赞
踩
- import torch
- import torch.nn as nn
- import torch.optim as optim
- import torchvision.transforms as transforms
- from torchvision.datasets import MNIST
- from torch.utils.data import DataLoader
-
-
- # 定义神经网络模型
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.fc1 = nn.Linear(784, 256)
- self.fc2 = nn.Linear(256, 128)
- self.fc3 = nn.Linear(128, 10)
-
- def forward(self, x):
- x = x.view(x.size(0), -1)
- x = torch.relu(self.fc1(x))
- x = torch.relu(self.fc2(x))
- x = self.fc3(x)
- return x
-
-
- # 设置一些超参数
- learning_rate = 0.001
- batch_size = 64
- num_epochs = 10
-
- # 加载 MNIST 数据集
- train_dataset = MNIST(root='.', train=True, transform=transforms.ToTensor(), download=True)
- test_dataset = MNIST(root='.', train=False, transform=transforms.ToTensor())
- train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
- test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
-
- # 创建模型和优化器
- model = Net()
- optimizer = optim.Adam(model.parameters(), lr=learning_rate)
- criterion = nn.CrossEntropyLoss()
-
- # 训练模型
- total_step = len(train_loader)
- for epoch in range(num_epochs):
- for i, (images, labels) in enumerate(train_loader):
- # 前向传播
- outputs = model(images)
- loss = criterion(outputs, labels)
-
- # 反向传播和优化
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
-
- if (i + 1) % 100 == 0:
- print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step,
- loss.item()))
-
- # 测试模型
- model.eval()
- with torch.no_grad():
- correct = 0
- total = 0
- for images, labels in test_loader:
- outputs = model(images)
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
-
- print('准确率: {:.2f}%'.format(100 * correct / total))
实验结果
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。