当前位置:   article > 正文

Pytorch极简入门教程(十二) —— 四种天气数据集的分类_基于pytoch空气湿度数据集

基于pytoch空气湿度数据集
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import torchvision
# python内置库 os shutil
import os
import shutil
"""
torchvision.datasets.ImageFolder   # 从分类的文件夹中创建dataset数据
"""
base_dir = r"./dataset/4weather"
if not os.path.isdir(base_dir):
    os.makedirs(base_dir) # makedirs创建多级目录 mkdir创建一级目录
    train_dir = os.path.join(base_dir, "train")
    test_dir = os.path.join(base_dir, "test") # os.path.join()添加目录
    os.mkdir(train_dir)
    os.mkdir(test_dir)
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test") # os.path.join()添加目录
specises = ['cloudy', 'rain', 'shine', 'sunrise']
# 采用循环分别创建四个类别
creation = 0  # 创建目录的标志位
if creation==1:
    for train_or_test in ['train', 'test']:
        for spec in specises:
            os.mkdir(os.path.join(base_dir, train_or_test, spec))

image_dir = r"./dataset2"
print("os.listdir(image_dir)",os.listdir(image_dir))
for i, img in enumerate(os.listdir(image_dir)):
    for spec in specises:  # 分类
        if spec in img:    # 字符串在名字中
            s = os.path.join(image_dir, img) #原始图片的路径
            if i % 5 == 0:
                #                                  类别   名字
                d = os.path.join(base_dir, "test", spec, img)  #
            else:
                #                                  类别   名字
                d = os.path.join(base_dir, "train", spec, img)
            shutil.copy(s, d)
# 计算到底有多少张图片
for train_or_test in ["train", "test"]:
    for spec in specises:
        # 用len计算长度:
        print(train_or_test, spec, len(os.listdir(os.path.join(base_dir, train_or_test, spec))))

from torchvision import transforms
transform = transforms.Compose([
    transforms.Resize((96, 96)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

train_ds = torchvision.datasets.ImageFolder(train_dir, transform=transform)
test_ds= torchvision.datasets.ImageFolder(test_dir, transform=transform)
# 打印train_ds.classes的种类
print("train_ds.classes:\t", train_ds.classes)
# 种类的编号
print("train_ds.class_to_idx:\t", train_ds.class_to_idx)
# 查看train_ds的数量
print("len(train_ds):{}\t, len(test_ds):{}\t".format(len(train_ds), len(test_ds)))

BATCHSIZE = 16
# 创建
train_dl = DataLoader(train_ds, batch_size=BATCHSIZE, shuffle=True)
test_dl = DataLoader(test_ds, batch_size=BATCHSIZE)

imgs, labels = next(iter(train_dl))
print("imgs.shape:\t", imgs.shape)
print("imgs[0]:\t", imgs[0].shape)

# permute交换维度顺序
im = imgs[0].permute(1, 2, 0)
print("im.shape:\t", im.shape)
im = im.numpy() # 转换成numpy
print("type(im):\t", type(im))

im = (im + 1) / 2  # 将im的取值范围锁定在0~1之间
print("im.max():\t{} \t im.min():\t{}".format(im.max(), im.min()))

#plt.imshow(im)  # 需要时开启图片展示
#plt.show()    # 需要时开启图片展示

# 打印第一张图片的标签
print("labels[0]:\t", labels[0])
# 将标签翻译成文本
id_to_class = dict((v, k) for k, v in  train_ds.class_to_idx.items())
print("id_to_class:\t", id_to_class)

plt.figure(figsize=(12, 8))
# enumerate 得到一个序号
for i, (img, label) in enumerate(zip(imgs[:6], labels[:6])):
    img = (img.permute(1, 2, 0).numpy() + 1) / 2
    plt.subplot(2, 3, i+1)
    plt.title(label.item())
#    plt.imshow(img)  # 需要时开启图片展示
#    plt.show()    # 需要时开启图片展示

class Net(nn.Module):
    def __init__(self):
        super(Net, self). __init__()
        self.conv1 = nn.Conv2d(3, 16, 3)
        self.conv2 = nn.Conv2d(16, 32, 3)
        self.conv3 = nn.Conv2d(32, 64, 3)
        """
        添加Dropout和Dropout2d 
        """
        self.drop = nn.Dropout(0.5)
        self.drop2d = nn.Dropout2d(0.5)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(64*10*10, 1024) #由model(imgs)报错可知。
        self.fc2 = nn.Linear(1024, 256)
        self.fc3 = nn.Linear(256, 4)
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool(x)
        x = F.relu(self.conv2(x))
        x = self.pool(x)
        x = F.relu(self.conv3(x))
        x = self.pool(x)
        x = self.drop2d(x)
       # print("x.size():\t", x.size())  # 这里是为了打印出全连接层的输入值,找到输入值后可注释
        x = x.view(-1, x.size(1)*x.size(2)*x.size(3))
        x = F.relu(self.fc1(x))
        """
        添加Dropout层
        """
        x = self.drop(x)
        x = F.relu(self.fc2(x))
        x = self.drop(x)
        x = self.fc3(x)
        return x
model = Net()
# 输出图片信息
preds = model(imgs)
# 打印一下输入图片的尺寸
print("imgs.shape:\t", imgs.shape) # torch.Size([16, 3, 96, 96])
# 打印一下preds的形状
print("preds.shape:\t", preds.shape) # torch.Size([16, 4])  原因: 4种类别 最终找最大的
preds = torch.argmax(preds, 1)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device:\t", device)

model = model.to(device) # 网络加载GPU
loss_fn = nn.CrossEntropyLoss()
optim = torch.optim.Adam(model.parameters(), lr=0.001)

"""""""""""""""""""""""""""""""""""""""""""""""""""
model.train()   训练模式
model.eval()    预测模式     # 主要影响dropout层 BN层
"""""""""""""""""""""""""""""""""""""""""""""""""""
def fit(epoch, model, trainloader, testloader):
    correct = 0
    total = 0
    running_loss = 0
    model.train() #模型进入训练模式
    for x, y in trainloader:
        x, y = x.to(device), y.to(device)
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optim.zero_grad()
        loss.backward()
        optim.step()
        with torch.no_grad():
            y_pred = torch.argmax(y_pred, dim=1)
            correct += (y_pred == y).sum().item()
            total += y.size(0)
            running_loss += loss.item()

    epoch_loss = running_loss / len(trainloader.dataset)
    epoch_acc = correct / total

    test_correct = 0
    test_total = 0
    test_running_loss = 0
    model.eval()  #模型进入预测模式
    with torch.no_grad():
        for x, y in testloader:
            x, y = x.to(device), y.to(device)
            y_pred = model(x)
            loss = loss_fn(y_pred, y)
            y_pred = torch.argmax(y_pred, dim=1)
            test_correct += (y_pred == y).sum().item()
            test_total += y.size(0)
            test_running_loss += loss.item()

    epoch_test_loss = test_running_loss / len(testloader.dataset)
    epoch_test_acc = test_correct / test_total

    print('epoch: ', epoch,
          'loss: ', round(epoch_loss, 3),
          'accuracy:', round(epoch_acc, 3),
          'test_loss: ', round(epoch_test_loss, 3),
          'test_accuracy:', round(epoch_test_acc, 3)
          )

    return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc

epochs = 30
train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch,
                                                                 model,
                                                                 train_dl,
                                                                 test_dl)
    train_loss.append(epoch_loss)
    train_acc.append(epoch_acc)
    test_loss.append(epoch_test_loss)
    test_acc.append(epoch_test_acc)

plt.plot(range(1, epochs+1), train_loss, label='train_loss')
plt.plot(range(1, epochs+1), test_loss, label="test_loss")
plt.legend()  # 小图标
plt.show()

plt.plot(range(1, epochs+1), train_acc, label='train_acc')
plt.plot(range(1, epochs+1), test_acc, label="test_acc")
plt.legend()
plt.show()

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
os.listdir(image_dir) ['cloudy1.jpg', 'cloudy10.jpg', 'cloudy100.jpg', 'cloudy101.jpg', 'cloudy102.jpg', 'cloudy103.jpg', 'cloudy104.jpg', 'cloudy105.jpg', 'cloudy106.jpg', 'cloudy107.jpg', 'cloudy108.jpg', 'cloudy109.jpg', 'cloudy11.jpg', 'cloudy110.jpg', 'cloudy111.jpg', 'cloudy112.jpg', 'cloudy113.jpg', 'cloudy114.jpg', 'cloudy115.jpg', 'cloudy116.jpg', 'cloudy117.jpg', 'cloudy118.jpg', 'cloudy119.jpg', 'cloudy12.jpg', 'cloudy120.jpg', 'cloudy121.jpg', 'cloudy122.jpg', 'cloudy123.jpg', 'cloudy124.jpg', 'cloudy125.jpg', 'cloudy126.jpg', 'cloudy202.jpg', 'cloudy203.jpg', 'cloudy204.jpg', 'cloudy205.jpg', 'cloudy206.jpg', 'cloudy207.jpg', 'cloudy208.jpg', 'cloudy209.jpg', 'cloudy21.jpg', 'cloudy210.jpg', 'cloudy211.jpg', 'cloudy212.jpg', 'cloudy213.jpg', 'cloudy214.jpg', 'cloudy215.jpg', 'cloudy216.jpg', 'cloudy217.jpg', 'cloudy218.jpg', 'cloudy219.jpg', 'cloudy22.jpg'.............]
train cloudy 240
train rain 172
train shine 202
train sunrise 286
test cloudy 60
test rain 43
test shine 51
test sunrise 71
train_ds.classes:	 ['cloudy', 'rain', 'shine', 'sunrise']
train_ds.class_to_idx:	 {'cloudy': 0, 'rain': 1, 'shine': 2, 'sunrise': 3}
len(train_ds):900	, len(test_ds):225	
imgs.shape:	 torch.Size([16, 3, 96, 96])
imgs[0]:	 torch.Size([3, 96, 96])
im.shape:	 torch.Size([96, 96, 3])
type(im):	 <class 'numpy.ndarray'>
im.max():	0.9960784316062927 	 im.min():	0.0
labels[0]:	 tensor(3)
id_to_class:	 {0: 'cloudy', 1: 'rain', 2: 'shine', 3: 'sunrise'}
imgs.shape:	 torch.Size([16, 3, 96, 96])
preds.shape:	 torch.Size([16, 4])
device:	 cuda:0
epoch:  0 loss:  0.056 accuracy: 0.567 test_loss:  0.039 test_accuracy: 0.778
epoch:  1 loss:  0.04 accuracy: 0.704 test_loss:  0.039 test_accuracy: 0.738
epoch:  2 loss:  0.036 accuracy: 0.759 test_loss:  0.036 test_accuracy: 0.791
epoch:  3 loss:  0.033 accuracy: 0.762 test_loss:  0.034 test_accuracy: 0.8
epoch:  4 loss:  0.033 accuracy: 0.784 test_loss:  0.038 test_accuracy: 0.769
epoch:  5 loss:  0.03 accuracy: 0.801 test_loss:  0.039 test_accuracy: 0.8
epoch:  6 loss:  0.032 accuracy: 0.818 test_loss:  0.055 test_accuracy: 0.804
epoch:  7 loss:  0.031 accuracy: 0.813 test_loss:  0.036 test_accuracy: 0.827
epoch:  8 loss:  0.029 accuracy: 0.823 test_loss:  0.039 test_accuracy: 0.827
epoch:  9 loss:  0.026 accuracy: 0.861 test_loss:  0.036 test_accuracy: 0.836
epoch:  10 loss:  0.026 accuracy: 0.841 test_loss:  0.041 test_accuracy: 0.853
epoch:  11 loss:  0.022 accuracy: 0.866 test_loss:  0.041 test_accuracy: 0.889
epoch:  12 loss:  0.026 accuracy: 0.85 test_loss:  0.044 test_accuracy: 0.778
epoch:  13 loss:  0.026 accuracy: 0.846 test_loss:  0.045 test_accuracy: 0.844
epoch:  14 loss:  0.022 accuracy: 0.858 test_loss:  0.029 test_accuracy: 0.867
epoch:  15 loss:  0.017 accuracy: 0.901 test_loss:  0.035 test_accuracy: 0.88
epoch:  16 loss:  0.017 accuracy: 0.903 test_loss:  0.036 test_accuracy: 0.884
epoch:  17 loss:  0.016 accuracy: 0.908 test_loss:  0.042 test_accuracy: 0.876
epoch:  18 loss:  0.016 accuracy: 0.92 test_loss:  0.026 test_accuracy: 0.902
epoch:  19 loss:  0.016 accuracy: 0.913 test_loss:  0.043 test_accuracy: 0.88
epoch:  20 loss:  0.015 accuracy: 0.924 test_loss:  0.052 test_accuracy: 0.893
epoch:  21 loss:  0.011 accuracy: 0.944 test_loss:  0.029 test_accuracy: 0.893
epoch:  22 loss:  0.01 accuracy: 0.952 test_loss:  0.035 test_accuracy: 0.938
epoch:  23 loss:  0.014 accuracy: 0.926 test_loss:  0.04 test_accuracy: 0.92
epoch:  24 loss:  0.01 accuracy: 0.943 test_loss:  0.04 test_accuracy: 0.916
epoch:  25 loss:  0.01 accuracy: 0.947 test_loss:  0.037 test_accuracy: 0.92
epoch:  26 loss:  0.01 accuracy: 0.95 test_loss:  0.044 test_accuracy: 0.92
epoch:  27 loss:  0.017 accuracy: 0.913 test_loss:  0.03 test_accuracy: 0.893
epoch:  28 loss:  0.011 accuracy: 0.941 test_loss:  0.039 test_accuracy: 0.907
epoch:  29 loss:  0.014 accuracy: 0.942 test_loss:  0.026 test_accuracy: 0.88
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/菜鸟追梦旅行/article/detail/581953
推荐阅读
相关标签
  

闽ICP备14008679号