赞
踩
import numpy as np
import pandas as pd
import torch
import torchvision
import pickle
import matplotlib
from torch import nn
from torch.nn import functional as F
from IPython import display
from matplotlib import pyplot as plt
使用 Fashion MNIST 数据集训练多分类模型。Fashion MNIST 数据集的具体信息可以参照 Fashion MNIST 数据集使用。因为设备的原因,从数据集中随机抽取部分子集来训练模型。
# 样本读取 with open('./train_data', 'rb') as fp: Train = pickle.load(fp) with open('./test_data', 'rb') as fp: Test = pickle.load(fp) # 数据集准备,加载 train_data = torch.utils.data.TensorDataset(Train['data'], Train['labels']) test_data = torch.utils.data.TensorDataset(Test['data'], Test['labels']) batch_size = 1000 train_Loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) test_Loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True)
# 定义简单多层感知机网络 class MyMLP(nn.Module): def __init__(self, input_size, out_size): super().__init__() self.flatten = nn.Flatten() self.hidden1 = nn.Linear(input_size, 700) self.hidden2 = nn.Linear(700, 300) self.out = nn.Linear(300, out_size) def forward(self, X): h1 = self.flatten(X) h2 = F.relu(self.hidden1(h1)) h3 = F.relu(self.hidden2(h2)) out = self.out(h3) return out net = MyMLP(28*28, 10) num_epochs, lr = 40, 0.1 loss = nn.CrossEntropyLoss(reduce='none') trainer = torch.optim.SGD(net.parameters(), lr=lr)
来自:动手学深度学习
累加器:存放样本数据相关信息。
# 定义累加器,用于后续计算
class accumulator:
"""定义n个变量的累加器"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, index):
return self.data[index]
训练过程显示:动态显示迭代训练过程。
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): """Set the axes for matplotlib. Defined in :numref:`sec_calculus`""" axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) axes.set_xscale(xscale) axes.set_yscale(yscale) axes.set_xlim(xlim) axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() # 定义画图函数 class Animator: def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None, xscale='linear', yscale='linear', fmts=('-', 'm--', 'g-', 'r:'), nrows=1, ncols=1, figsize=(3.5, 2.5)): """ 参数: """ if legend is None: legend = [] display.set_matplotlib_formats('svg') self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize) if nrows * ncols == 1: self.axes = [self.axes, ] # Use a lambda function to capture arguments self.config_axes = lambda: set_axes( self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend) self.X, self.Y, self.fmts = None, None, fmts def add(self, x, y): # 添加的点处理 if not hasattr(y, "__len__"): y = [y] n = len(y) if not hasattr(x, "__len__"): x = [x] * n # 根据点数, 初始化嵌套列表 if not self.X: self.X = [[] for _ in range(n)] if not self.Y: self.Y = [[] for _ in range(n)] for i, (a, b) in enumerate(zip(x,y)): if a is not None and b is not None: self.X[i].append(a) self.Y[i].append(b) self.axes[0].cla() for x, y, fmt in zip(self.X, self.Y, self.fmts): self.axes[0].plot(x, y, fmt) self.config_axes() display.display(self.fig) display.clear_output(wait=True)
评价函数:准确度评价。
# 定义评价函数 def accuracy(y_hat, y): """计算当前预测的的数量""" if len(y_hat.shape) * y_hat.shape[1] > 1: y_hat = torch.argmax(y_hat, axis=1) cmp = y_hat.type(y.dtype) == y return float(torch.sum(cmp.type(y.dtype))) def evaluate_accuracy(net, data_iter): """计算模型在当前数据集上的精度""" if isinstance(net, torch.nn.Module): net.eval() # 定义累加器 metric = accumulator(2) with torch.no_grad(): for X, y in data_iter: metric.add(accuracy(net(X), y), y.size()[0]) return metric[0] / metric[1]
# 单次epoch训练过程 def train_model_epoch(net, train_iter, loss, updater): if isinstance(net, torch.nn.Module): net.train() # 训练损失, 训练精度, 样本数量 metric = accumulator(3) for X, y in train_iter: # 计算梯度,更新参数 y_hat = net(X) l = loss(y_hat, y) if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.sum().backward() updater.step() metric.add(float(l.sum()), accuracy(y_hat, y), y.size()[0]) return metric[0]/metric[2], metric[1]/metric[2] # 定义训练过程 def train_model(net, train_iter, test_iter, loss, num_epochs, trainer): animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9], legend=['train loss', 'train acc', 'test acc']) for epoch in range(num_epochs): train_metrics = train_model_epoch(net, train_iter, loss, trainer) test_acc = evaluate_accuracy(net, test_iter) animator.add(epoch+1, train_metrics+(test_acc,))
train_model(net, train_Loader, test_Loader, loss, num_epochs, trainer)
训练结果:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。