当前位置:   article > 正文

寒假PyTorch工具第五天_with zipfile.zipfile('train.zip') as train_zip:

with zipfile.zipfile('train.zip') as train_zip:

课程记录

从模型创建方法到AlexNet

 


课程代码

 

 


作业

1.      采用步进(Step into)的调试方法从创建网络模型开始(net = LeNet(classes=2))进入到每一个被调用函数,观察net的_modules字段何时被构建并且赋值,记录其中所有进入的类与函数

例如:

第一步:net = LeNet(classes=2)

第二步:LeNet类,__init__(),super(LeNet, self).__init__()

第三步:  Module类, ......

第n步:返回net

2.      采用sequential容器,改写Alexnet,给features中每一个网络层增加名字,并通过下面这行代码打印出来

print(alexnet._modules['features']._modules.keys())

1. AlexNet

 

  1. import os
  2. import zipfile
  3. for dirname, _, filenames in os.walk('/kaggle/input'):
  4. for filename in filenames:
  5. print(os.path.join(dirname, filename))
  6. # unzip
  7. print(os.getcwd())
  8. os.makedirs('data', exist_ok=True)
  9. with zipfile.ZipFile('../input/dogs-vs-cats-redux-kernels-edition/train.zip') as train_zip:
  10. train_zip.extractall('data')
  11. with zipfile.ZipFile('../input/dogs-vs-cats-redux-kernels-edition/test.zip') as test_zip:
  12. test_zip.extractall('data')
  13. # show unzip dir
  14. train_dir = './data/train'
  15. test_dir = './data/test'
  16. print('len:', len(os.listdir(train_dir)), len(os.listdir(test_dir)))
  17. os.listdir(train_dir)[:5]
  18. os.listdir(test_dir)[:5]
  19. import numpy as np
  20. import pandas as pd
  21. import glob
  22. import os
  23. import torch
  24. import matplotlib.pyplot as plt
  25. from PIL import Image
  26. from sklearn.model_selection import train_test_split
  27. from torchvision import datasets, models, transforms
  28. import torch.nn as nn
  29. import torch.optim as optim
  30. batch_size = 100
  31. device = 'cuda' if torch.cuda.is_available() else 'cpu'
  32. print(device)
  33. torch.manual_seed(1234)
  34. if device =='cuda':
  35. torch.cuda.manual_seed_all(1234)
  36. lr = 0.001
  37. train_list = glob.glob(os.path.join(train_dir,'*.jpg'))
  38. test_list = glob.glob(os.path.join(test_dir, '*.jpg'))
  39. print('show data:', len(train_list), train_list[:3])
  40. print('show data:', len(test_list), test_list[:3])
  41. fig = plt.figure()
  42. ax = fig.add_subplot(1,1,1)
  43. img = Image.open(train_list[0])
  44. plt.imshow(img)
  45. plt.axis('off')
  46. plt.show()
  47. print(type(img))
  48. img_np = np.asarray(img)
  49. print(img_np.shape)
  50. train_list, val_list = train_test_split(train_list, test_size=0.2)
  51. print(len(train_list), train_list[:3])
  52. print(len(val_list), val_list[:3])
  53. train_transforms = transforms.Compose([
  54. transforms.Resize((224, 224)),
  55. # transforms.RandomCrop(224),
  56. transforms.ToTensor(),
  57. ])
  58. val_transforms = transforms.Compose([
  59. transforms.Resize((224, 224)),
  60. # transforms.RandomCrop(224),
  61. transforms.ToTensor(),
  62. ])
  63. test_transforms = transforms.Compose([
  64. transforms.Resize((224, 224)),
  65. # transforms.RandomCrop(224),
  66. transforms.ToTensor(),
  67. ])
  68. class dataset(torch.utils.data.Dataset):
  69. def __init__(self,file_list,now_transform):
  70. self.file_list = file_list # list of path
  71. self.transform = now_transform
  72. def __len__(self):
  73. self.filelength = len(self.file_list)
  74. return self.filelength
  75. def __getitem__(self,idx):
  76. img_path = self.file_list[idx]
  77. img = Image.open(img_path)
  78. # print(img.size)
  79. img_transformed = self.transform(img)
  80. # test 没有标签?
  81. label = img_path.split('/')[-1].split('.')[0]
  82. if label == 'dog':
  83. label=1
  84. elif label == 'cat':
  85. label=0
  86. else:
  87. assert False
  88. return img_transformed,label
  89. train_data = dataset(train_list, train_transforms)
  90. val_data = dataset(val_list, test_transforms)
  91. # test_data = dataset(test_list, transform=test_transforms)
  92. train_loader = torch.utils.data.DataLoader(dataset = train_data, batch_size=batch_size, shuffle=True )
  93. val_loader = torch.utils.data.DataLoader(dataset = val_data, batch_size=batch_size, shuffle=True)
  94. # test_loader = torch.utils.data.DataLoader(dataset = test_data, batch_size=batch_size, shuffle=True)
  95. print(len(train_data), len(train_loader))
  96. print(len(val_data), len(val_loader))
  97. print(train_data, type(train_data))
  98. t1, t2 = train_data[7]
  99. print(t1, t2)
  100. print(type(t1))
  101. print(t1.shape)
  102. class AlexNet(nn.Module):
  103. def __init__(self, num_classes = 2):
  104. super(AlexNet, self).__init__()
  105. self.features = nn.Sequential(
  106. nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
  107. nn.ReLU(inplace=True),
  108. nn.MaxPool2d(kernel_size=3, stride=2),
  109. nn.Conv2d(64, 192, kernel_size=5, padding=2),
  110. nn.ReLU(inplace=True),
  111. nn.MaxPool2d(kernel_size=3, stride=2),
  112. nn.Conv2d(192, 384, kernel_size=3, padding=1),
  113. nn.ReLU(inplace=True),
  114. nn.Conv2d(384, 256, kernel_size=3, padding=1),
  115. nn.ReLU(inplace=True),
  116. nn.Conv2d(256, 256, kernel_size=3, padding=1),
  117. nn.ReLU(inplace=True),
  118. nn.MaxPool2d(kernel_size=3, stride=2),
  119. )
  120. self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
  121. self.classifier = nn.Sequential(
  122. nn.Dropout(),
  123. nn.Linear(256 * 6 * 6, 4096),
  124. nn.ReLU(inplace=True),
  125. nn.Dropout(),
  126. nn.Linear(4096, 4096),
  127. nn.ReLU(inplace=True),
  128. nn.Linear(4096, num_classes),
  129. )
  130. def forward(self, x: torch.Tensor) -> torch.Tensor:
  131. x = self.features(x)
  132. x = self.avgpool(x)
  133. x = torch.flatten(x, 1)
  134. x = self.classifier(x)
  135. return x
  136. # model = CNN_STD().to(device)
  137. model = AlexNet().to(device)
  138. model.train()
  139. print(model._modules.keys())
  140. print(model._modules['features'][0])
  141. optimizer = optim.Adam(params = model.parameters(),lr=lr)
  142. loss_f = nn.CrossEntropyLoss()
  143. epochs = 10
  144. print('start epoch iter, please wait...')
  145. for epoch in range(epochs):
  146. epoch_loss = 0
  147. epoch_accuracy = 0
  148. for data, label in train_loader:
  149. data = data.to(device)
  150. label = label.to(device)
  151. output = model(data)
  152. loss = loss_f(output, label)
  153. optimizer.zero_grad()
  154. loss.backward()
  155. optimizer.step()
  156. acc = ((output.argmax(dim=1) == label).float().mean())
  157. epoch_accuracy += acc/len(train_loader)
  158. epoch_loss += loss/len(train_loader)
  159. print('Epoch : {}, train accuracy : {}, train loss : {}'.format(epoch+1, epoch_accuracy,epoch_loss))
  160. with torch.no_grad():
  161. epoch_val_accuracy=0
  162. epoch_val_loss =0
  163. for data, label in val_loader:
  164. data = data.to(device)
  165. label = label.to(device)
  166. val_output = model(data)
  167. val_loss = loss_f(val_output,label)
  168. acc = ((val_output.argmax(dim=1) == label).float().mean())
  169. epoch_val_accuracy += acc/ len(val_loader)
  170. epoch_val_loss += val_loss/ len(val_loader)
  171. print('Epoch : {}, val_accuracy : {}, val_loss : {}'.format(epoch+1, epoch_val_accuracy,epoch_val_loss))

 

注意, 猫狗大战训练效果不理想, 不知是数据太小, 还是代码错, 还是需要调超参

 

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/羊村懒王/article/detail/546191
推荐阅读
相关标签
  

闽ICP备14008679号