当前位置:   article > 正文

Pytorch实现Mnist数据分类(11)_metalearnig mnist

metalearnig mnist

(1)简介

MNIST 数据集来自美国国家标准与技术研究所, National Institute of Standards and Technology (NIST). 训练集 (training set) 由来自 250 个不同人手写的数字构成, 其中 50% 是高中学生, 50% 来自人口普查局 (the Census Bureau) 的工作人员. 测试集(test set) 也是同样比例的手写数字数据.

网络模型

  • 输入层(28 * 28 * 1)
  • 卷积层1(28 * 28 * 32)
  • pooling层1(14 * 14 * 32)
  • 卷积层2(14 * 14 * 64)
  • pooling层2(7 * 7 * 64)
  • 全连接层(1 * 1024)
  • softmax层(10)

(2)代码

  1. # library
  2. # standard library
  3. import os
  4. # third-party library
  5. import torch
  6. import torch.nn as nn
  7. import torch.utils.data as Data
  8. import torchvision
  9. import matplotlib.pyplot as plt
  10. # torch.manual_seed(1) # reproducible
  11. # Hyper Parameters
  12. EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
  13. BATCH_SIZE = 50
  14. LR = 0.001 # learning rate
  15. DOWNLOAD_MNIST = False
  16. # Mnist digits dataset
  17. if not(os.path.exists('./mnist/')) or not os.listdir('./mnist/'):
  18. # not mnist dir or mnist is empyt dir
  19. DOWNLOAD_MNIST = True
  20. train_data = torchvision.datasets.MNIST(
  21. root='./mnist/',
  22. train=True, # this is training data
  23. transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
  24. # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
  25. download=DOWNLOAD_MNIST,
  26. )
  27. # plot one example
  28. print(train_data.train_data.size()) # (60000, 28, 28)
  29. print(train_data.train_labels.size()) # (60000)
  30. plt.imshow(train_data.train_data[0].numpy(), cmap='gray')
  31. plt.title('%i' % train_data.train_labels[0])
  32. plt.show()
  33. # Data Loader for easy mini-batch return in training, the image batch shape will be (50, 1, 28, 28)
  34. train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
  35. # pick 2000 samples to speed up testing
  36. test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
  37. test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
  38. test_y = test_data.test_labels[:2000]
  39. class CNN(nn.Module):
  40. def __init__(self):
  41. super(CNN, self).__init__()
  42. self.conv1 = nn.Sequential( # input shape (1, 28, 28)
  43. nn.Conv2d(
  44. in_channels=1, # input height
  45. out_channels=16, # n_filters
  46. kernel_size=5, # filter size
  47. stride=1, # filter movement/step
  48. padding=2, # if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if stride=1
  49. ), # output shape (16, 28, 28)
  50. nn.ReLU(), # activation
  51. nn.MaxPool2d(kernel_size=2), # choose max value in 2x2 area, output shape (16, 14, 14)
  52. )
  53. self.conv2 = nn.Sequential( # input shape (16, 14, 14)
  54. nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, 14, 14)
  55. nn.ReLU(), # activation
  56. nn.MaxPool2d(2), # output shape (32, 7, 7)
  57. )
  58. self.out = nn.Linear(32 * 7 * 7, 10) # fully connected layer, output 10 classes
  59. def forward(self, x):
  60. x = self.conv1(x)
  61. x = self.conv2(x)
  62. x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
  63. output = self.out(x)
  64. return output, x # return x for visualization
  65. cnn = CNN()
  66. print(cnn) # net architecture
  67. optimizer = torch.optim.RMSprop(cnn.parameters(), lr=LR) # optimize all cnn parameters
  68. loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
  69. # following function (plot_with_labels) is for visualization, can be ignored if not interested
  70. from matplotlib import cm
  71. try: from sklearn.manifold import TSNE; HAS_SK = True
  72. except: HAS_SK = False; print('Please install sklearn for layer visualization')
  73. def plot_with_labels(lowDWeights, labels):
  74. plt.cla()
  75. X, Y = lowDWeights[:, 0], lowDWeights[:, 1]
  76. for x, y, s in zip(X, Y, labels):
  77. c = cm.rainbow(int(255 * s / 9)); plt.text(x, y, s, backgroundcolor=c, fontsize=9)
  78. plt.xlim(X.min(), X.max()); plt.ylim(Y.min(), Y.max()); plt.title('Visualize last layer'); plt.show(); plt.pause(0.01)
  79. plt.ion()
  80. # training and testing
  81. for epoch in range(EPOCH):
  82. for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
  83. output = cnn(b_x)[0] # cnn output
  84. loss = loss_func(output, b_y) # cross entropy loss
  85. optimizer.zero_grad() # clear gradients for this training step
  86. loss.backward() # backpropagation, compute gradients
  87. optimizer.step() # apply gradients
  88. if step % 50 == 0:
  89. test_output, last_layer = cnn(test_x)
  90. pred_y = torch.max(test_output, 1)[1].data.numpy()
  91. accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
  92. print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
  93. if HAS_SK:
  94. # Visualization of trained flatten layer (T-SNE)
  95. tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
  96. plot_only = 500
  97. low_dim_embs = tsne.fit_transform(last_layer.data.numpy()[:plot_only, :])
  98. labels = test_y.numpy()[:plot_only]
  99. plot_with_labels(low_dim_embs, labels)
  100. plt.ioff()
  101. # print 10 predictions from test data
  102. test_output, _ = cnn(test_x[:10])
  103. pred_y = torch.max(test_output, 1)[1].data.numpy()
  104. print(pred_y, 'prediction number')
  105. print(test_y[:10].numpy(), 'real number')

(3)结果图

注:文中代码主要参考:https://github.com/MorvanZhou

了解更多关于《计算机视觉与图形学》相关知识,请关注公众号:

下载我们视频中代码和相关讲义,请在公众号回复:计算机视觉课程资料

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/盐析白兔/article/detail/372465?site
推荐阅读
相关标签
  

闽ICP备14008679号