当前位置:   article > 正文

台式机人工智能GPU实践_ai人工智能服务器gpu实验

ai人工智能服务器gpu实验

总结 - Training | Microsoft Learn 博主在微软教程中有引用,同学们要是想深入研究可以自行追溯

本博客从零带大家起一个简单的深度学习模型(强学校相关性,弱迁移性)

以下是详细代码:

第1步:引用+下载数据集

  1. import torch
  2. from torch.utils.data import Dataset
  3. from torchvision import datasets
  4. from torchvision.transforms import ToTensor, Lambda
  5. import matplotlib.pyplot as plt
  6. import os
  7. os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
  8. training_data = datasets.FashionMNIST(
  9. root="data",
  10. train=True,
  11. download=True,
  12. transform=ToTensor()
  13. )
  14. test_data = datasets.FashionMNIST(
  15. root="data",
  16. train=False,
  17. download=True,
  18. transform=ToTensor()
  19. )

第2步:

  1. labels_map = {
  2. 0: "T-Shirt",
  3. 1: "Trouser",
  4. 2: "Pullover",
  5. 3: "Dress",
  6. 4: "Coat",
  7. 5: "Sandal",
  8. 6: "Shirt",
  9. 7: "Sneaker",
  10. 8: "Bag",
  11. 9: "Ankle Boot",
  12. }
  13. figure = plt.figure(figsize=(8, 8))
  14. cols, rows = 3, 3
  15. for i in range(1, cols * rows + 1):
  16. sample_idx = torch.randint(len(training_data), size=(1,)).item()
  17. img, label = training_data[sample_idx]
  18. figure.add_subplot(rows, cols, i)
  19. plt.title(labels_map[label])
  20. plt.axis("off")
  21. plt.imshow(img.squeeze(), cmap="gray")
  22. plt.show()

第3步:

  1. from torch.utils.data import DataLoader
  2. train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
  3. test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
  4. for X, y in test_dataloader:
  5. print("Shape of X [N, C, H, W]: ", X.shape)
  6. print("Shape of y: ", y.shape, y.dtype)
  7. break
  8. # Display sample data
  9. figure = plt.figure(figsize=(10, 8))
  10. cols, rows = 5, 5
  11. for i in range(1, cols * rows + 1):
  12. idx = torch.randint(len(test_data), size=(1,)).item()
  13. img, label = test_data[idx]
  14. figure.add_subplot(rows, cols, i)
  15. plt.title(label)
  16. plt.axis("off")
  17. plt.imshow(img.squeeze(), cmap="gray")
  18. plt.show()

第4步:Dataset 简而言之就是批次处理数据集

  1. from torchvision import datasets
  2. from torchvision.transforms import ToTensor, Lambda
  3. ds = datasets.FashionMNIST(
  4. root="data",
  5. train=True,
  6. download=True,
  7. transform=ToTensor(),
  8. target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))
  9. )

第5步:!!!!

  1. device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
  2. print('Using {} device'.format(device))

我们可以win+R调用出终端Terminal

输入 nvidia-smi

查看电脑显卡调用情况 学校的台式机只有一个GPU 0卡,所以就cuda:0即可

例:

第6步:设定Model模型

  1. from torch import nn
  2. class NeuralNetwork(nn.Module):
  3. def __init__(self):
  4. super(NeuralNetwork, self).__init__()
  5. self.flatten = nn.Flatten()
  6. self.linear_relu_stack = nn.Sequential(
  7. nn.Linear(28*28, 512),
  8. nn.ReLU(),
  9. nn.Linear(512, 512),
  10. nn.ReLU(),
  11. nn.Linear(512, 10),
  12. nn.ReLU()
  13. )
  14. def forward(self, x):
  15. x = self.flatten(x)
  16. logits = self.linear_relu_stack(x)
  17. return logits
  18. model = NeuralNetwork().to(device)
  19. print(model)

第7步:设定损失、学习率、迭代器等变量以及训练测试模型

  1. loss_fn = nn.CrossEntropyLoss()
  2. learning_rate = 1e-3
  3. optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
  4. def train(dataloader, model, loss_fn, optimizer):
  5. size = len(dataloader.dataset)
  6. for batch, (X, y) in enumerate(dataloader):
  7. X, y = X.to(device), y.to(device)
  8. # Compute prediction error
  9. pred = model(X)
  10. loss = loss_fn(pred, y)
  11. # Backpropagation
  12. optimizer.zero_grad()
  13. loss.backward()
  14. optimizer.step()
  15. if batch % 100 == 0:
  16. loss, current = loss.item(), batch * len(X)
  17. print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
  18. def test(dataloader, model):
  19. size = len(dataloader.dataset)
  20. model.eval()
  21. test_loss, correct = 0, 0
  22. with torch.no_grad():
  23. for X, y in dataloader:
  24. X, y = X.to(device), y.to(device)
  25. pred = model(X)
  26. test_loss += loss_fn(pred, y).item()
  27. correct += (pred.argmax(1) == y).type(torch.float).sum().item()
  28. test_loss /= size
  29. correct /= size
  30. print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")

第8步:

  1. epochs = 15
  2. for t in range(epochs):
  3. print(f"Epoch {t+1}\n-------------------------------")
  4. train(train_dataloader, model, loss_fn, optimizer)
  5. test(test_dataloader, model)
  6. print("Done!")

最后,存储模型参数,供之后使用

  1. torch.save(model.state_dict(), "data/model.pth")
  2. print("Saved PyTorch Model State to model.pth")
  3. model = NeuralNetwork()
  4. model.load_state_dict(torch.load("data/model.pth"))
  5. classes = [
  6. "T-shirt/top",
  7. "Trouser",
  8. "Pullover",
  9. "Dress",
  10. "Coat",
  11. "Sandal",
  12. "Shirt",
  13. "Sneaker",
  14. "Bag",
  15. "Ankle boot",
  16. ]
  17. model.eval()
  18. x, y = test_data[0][0], test_data[0][1]
  19. with torch.no_grad():
  20. pred = model(x)
  21. predicted, actual = classes[pred[0].argmax(0)], classes[y]
  22. print(f'Predicted: "{predicted}", Actual: "{actual}"')

以上就是全部过程,感谢大家

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家自动化/article/detail/758968
推荐阅读
相关标签
  

闽ICP备14008679号