赞
踩
在进行深度学习训练模型时,对于计算量小一些的模型,是可以在CPU上进行的。但是当计算量比较大时,我们希望利用GPU并行计算的能力去加快训练的速度。
Pytroch中使用GPU训练模型需要以下四步:
对应的代码为:
# 1.创建模型
model = Net()
# 2.定义device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 3.将模型加载到GPU(所定义的device)
model.to(device)
# 4.将输入和输出加载到GPU
inputs, target = inputs.to(device), target.to(device)
在下面这个示例中给出每一步如何操作:
原来用CPU的版本:
class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5) self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5) self.pooling = torch.nn.MaxPool2d(2) self.fc = torch.nn.Linear(320, 10) def forward(self, x): # Flatten data from (n, 1, 28, 28) to (n, 784) batch_size = x.size(0) x = F.relu(self.pooling(self.conv1(x))) x = F.relu(self.pooling(self.conv2(x))) x = x.view(batch_size, -1) # flatten x = self.fc(x) return x model = Net() def train(epoch): running_loss = 0.0 for batch_idx, data in enumerate(train_loader, 0): inputs, target = data optimizer.zero_grad() # forward + backward + update outputs = model(inputs) loss = criterion(outputs, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 300 == 299: print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 2000)) running_loss = 0.0 def test(): correct = 0 total = 0 with torch.no_grad(): for data in test_loader: inputs, target = data outputs = model(inputs) _, predicted = torch.max(outputs.data, dim=1) total += target.size(0) correct += (predicted == target).sum().item() print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))
现在我们使用后上面给出的四步将模型加载到GPU:
class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5) self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5) self.pooling = torch.nn.MaxPool2d(2) self.fc = torch.nn.Linear(320, 10) def forward(self, x): # Flatten data from (n, 1, 28, 28) to (n, 784) batch_size = x.size(0) x = F.relu(self.pooling(self.conv1(x))) x = F.relu(self.pooling(self.conv2(x))) x = x.view(batch_size, -1) # flatten x = self.fc(x) return x model = Net() # 1.创建模型,这个CPU版本的也需要 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 2. 定义device(GPU) model.to(device) #将模型加载到GPU def train(epoch): running_loss = 0.0 for batch_idx, data in enumerate(train_loader, 0): # inputs, target = data inputs, target = inputs.to(device), target.to(device) # 4. 将输入和输出加载到GPU optimizer.zero_grad() # forward + backward + update outputs = model(inputs) loss = criterion(outputs, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 300 == 299: print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 2000)) running_loss = 0.0 def test(): correct = 0 total = 0 with torch.no_grad(): for data in test_loader: # inputs, target = data inputs, target = inputs.to(device), target.to(device) # 4. 将输入和输出加载到GPU outputs = model(inputs) _, predicted = torch.max(outputs.data, dim=1) total += target.size(0) correct += (predicted == target).sum().item() print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。