当前位置:   article > 正文

pytorch 实现线性回归(深度学习)

pytorch 实现线性回归(深度学习)

一 查看原始函数

        y=2x+4.2

初始化

  1. %matplotlib inline
  2. import random
  3. import torch
  4. from d2l import torch as d2l

1.1 生成原始数据

  1. def synthetic_data(w, b, num_examples):
  2. x = torch.normal(0, 1, (num_examples, len(w)))
  3. y = torch.matmul(x, w) + b
  4. print('x:', x)
  5. print('y:', y)
  6. y += torch.normal(0, 0.01, y.shape) # 噪声
  7. return x, y.reshape((-1 , 1))
  1. true_w = torch.tensor([2.])
  2. true_b = 4.2
  3. print(f'true_w: {true_w}, true_b: {true_b}')
  4. features, labels = synthetic_data(true_w, true_b, 10)

1.2 数据转换

  1. def data_iter(batch_size, features, labels):
  2. num_examples = len(features)
  3. indices = list(range(num_examples))
  4. random.shuffle(indices)
  5. for i in range(0, num_examples, batch_size):
  6. batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])
  7. yield features[batch_indices], labels[batch_indices]
  8. batch_size = 10
  9. for x, y in data_iter(batch_size, features, labels):
  10. print(f'x: {x}, \ny: {y}')

1.3 初始化权重

随机初始化,w使用 均值0,方差 0.01 的随机值, b 初始化为1

  1. w = torch.normal(0, 0.01, size = (1,1), requires_grad=True)
  2. b = torch.zeros(1, requires_grad=True)
  3. w, b

二 执行训练

查看训练过程中的 参数变化:

  1. print(f'true_w: {true_w}, true_b: {true_b}')
  2. def squared_loss(y_hat, y):
  3. return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
  4. def linreg(x, w, b):
  5. return torch.matmul(x, w) + b
  6. def sgd(params, lr, batch_size):
  7. with torch.no_grad():
  8. for param in params:
  9. # print('param:', param, 'param.grad:', param.grad)
  10. param -= lr * param.grad / batch_size
  11. param.grad.zero_()
  12. lr = 0.03
  13. num_epochs = 1000
  14. for epoch in range(num_epochs):
  15. for x, y in data_iter(batch_size, features, labels):
  16. l = squared_loss(linreg(x, w, b), y) # 计算总损失
  17. print('w:', w, 'b:', b) # l:', l, '\n
  18. l.sum().backward()
  19. sgd([w, b], lr, batch_size)

 


三 测试梯度更新

初始化数据

  1. %matplotlib inline
  2. import random
  3. import torch
  4. from d2l import torch as d2l
  5. def synthetic_data(w, b, num_examples):
  6. x = torch.normal(0, 1, (num_examples, len(w)))
  7. y = torch.matmul(x, w) + b
  8. print('x:', x)
  9. print('y:', y)
  10. y += torch.normal(0, 0.01, y.shape) # 噪声
  11. return x, y.reshape((-1 , 1))
  12. true_w = torch.tensor([2.])
  13. true_b = 4.2
  14. print(f'true_w: {true_w}, true_b: {true_b}')
  15. features, labels = synthetic_data(true_w, true_b, 10)
  16. def data_iter(batch_size, features, labels):
  17. num_examples = len(features)
  18. indices = list(range(num_examples))
  19. random.shuffle(indices)
  20. for i in range(0, num_examples, batch_size):
  21. batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])
  22. yield features[batch_indices], labels[batch_indices]
  23. batch_size = 10
  24. for x, y in data_iter(batch_size, features, labels):
  25. print(f'x: {x}, \ny: {y}')
  26. w = torch.normal(0, 0.01, size = (1,1), requires_grad=True)
  27. b = torch.zeros(1, requires_grad=True)
  28. w, b

3.1 测试更新

  1. print(f'true_w: {true_w}, true_b: {true_b}')
  2. def squared_loss(y_hat, y):
  3. return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
  4. def linreg(x, w, b):
  5. return torch.matmul(x, w) + b
  6. def sgd(params, lr, batch_size):
  7. with torch.no_grad():
  8. for param in params:
  9. print('param:', param, 'param.grad:', param.grad)
  10. # param -= lr * param.grad / batch_size
  11. # param.grad.zero_()
  12. lr = 0.03
  13. num_epochs = 2
  14. for epoch in range(num_epochs):
  15. for x, y in data_iter(batch_size, features, labels):
  16. l = squared_loss(linreg(x, w, b), y) # 计算总损失
  17. print(f'\nepoch: {epoch},w:', w, 'b:', b) # l:', l, '\n
  18. l.sum().backward() # 计算更新梯度
  19. sgd([w, b], lr, batch_size)

使用 l.sum().backward()  # 计算更新梯度:

不使用更新时:

  1. print(f'true_w: {true_w}, true_b: {true_b}')
  2. def squared_loss(y_hat, y):
  3. return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
  4. def linreg(x, w, b):
  5. return torch.matmul(x, w) + b
  6. def sgd(params, lr, batch_size):
  7. with torch.no_grad():
  8. for param in params:
  9. print('param:', param, 'param.grad:', param.grad)
  10. # param -= lr * param.grad / batch_size
  11. # param.grad.zero_()
  12. lr = 0.03
  13. num_epochs = 2
  14. for epoch in range(num_epochs):
  15. for x, y in data_iter(batch_size, features, labels):
  16. l = squared_loss(linreg(x, w, b), y) # 计算总损失
  17. print(f'\nepoch: {epoch},w:', w, 'b:', b) # l:', l, '\n
  18. # l.sum().backward() # 计算更新梯度
  19. sgd([w, b], lr, batch_size)
  20. # break

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家自动化/article/detail/113869
推荐阅读
相关标签
  

闽ICP备14008679号