当前位置:   article > 正文

卷积神经网络之猫狗分类_猫狗大战(dogs vs cats)卷积神经网络分类

猫狗大战(dogs vs cats)卷积神经网络分类
# 卷积神经网络初体验
import os, shutil
from datetime import datetime

from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt


def data_initial():
    # 原始数据集解压目录
    original_dataset_dir = r'D:\pycharm\DownloadData\cats_vs_dogs\train\train'
    # 小数据集保存目录
    base_dir = r'D:\pycharm\DownloadData\cats_vs_dogs\mini'
    # os.mkdir(base_dir)

    # 创建训练集,验证集,测试集目录
    train_dir = os.path.join(base_dir, 'train')
    # os.mkdir(train_dir)
    validation_dir = os.path.join(base_dir, 'validation')
    # os.mkdir(validation_dir)
    test_dir = os.path.join(base_dir, 'test')
    # os.mkdir(test_dir)

    # 猫的训练目录
    train_cats_dir = os.path.join(train_dir, 'cats')
    # os.mkdir(train_cats_dir)
    # 狗的训练目录
    train_dogs_dir = os.path.join(train_dir, 'dogs')
    # os.mkdir(train_dogs_dir)

    # 猫的验证目录
    validation_cats_dir = os.path.join(validation_dir, 'cats')
    # os.mkdir(validation_cats_dir)
    # 狗的验证目录
    validation_dogs_dir = os.path.join(validation_dir, 'dogs')
    # os.mkdir(validation_dogs_dir)

    # 猫的测试目录
    test_cats_dir = os.path.join(test_dir, 'cats')
    # os.mkdir(test_cats_dir)
    # 狗的测试目录
    test_dogs_dir = os.path.join(test_dir, 'dogs')
    # os.mkdir(test_dogs_dir)

    # 1000张猫的训练集
    fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
    for fname in fnames:
        src = os.path.join(original_dataset_dir, fname)
        dst = os.path.join(train_cats_dir, fname)
        shutil.copyfile(src, dst)

    # 500张猫的验证集
    fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
    for fname in fnames:
        src = os.path.join(original_dataset_dir, fname)
        dst = os.path.join(validation_cats_dir, fname)
        shutil.copyfile(src, dst)

    # 500张猫的测试集
    fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
    for fname in fnames:
        src = os.path.join(original_dataset_dir, fname)
        dst = os.path.join(test_cats_dir, fname)
        shutil.copyfile(src, dst)

    # 1000张狗的训练集
    fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
    for fname in fnames:
        src = os.path.join(original_dataset_dir, fname)
        dst = os.path.join(train_dogs_dir, fname)
        shutil.copyfile(src, dst)

    # 1000张狗的验证集
    fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
    for fname in fnames:
        src = os.path.join(original_dataset_dir, fname)
        dst = os.path.join(validation_dogs_dir, fname)
        shutil.copyfile(src, dst)

    # 1000张狗的测试集
    fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
    for fname in fnames:
        src = os.path.join(original_dataset_dir, fname)
        dst = os.path.join(test_dogs_dir, fname)
        shutil.copyfile(src, dst)

    # 检查
    print('total train_cat: ', len(os.listdir(train_cats_dir)))
    print('total validation_cat: ', len(os.listdir(validation_cats_dir)))
    print('total test_cat: ', len(os.listdir(test_cats_dir)))

    print('total train_dog: ', len(os.listdir(train_dogs_dir)))
    print('total validation_dog: ', len(os.listdir(validation_dogs_dir)))
    print('total test_dog: ', len(os.listdir(test_dogs_dir)))
    # 定义缩放比例
    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        directory=train_dir,
        target_size=(150, 150),
        batch_size=20,
        class_mode='binary'
    )
    validation_generator = test_datagen.flow_from_directory(
        directory=validation_dir,
        target_size=(150, 150),
        batch_size=20,
        class_mode='binary'
    )
    return train_generator, validation_generator


# 创建模型
def create_convnet_model():
    model = models.Sequential()
    # 2D卷积层,32个过滤器,(3*3)卷积核,relu激活函数,输入形状为(150高*150宽*3通道)
    model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
    # 2D最大池化
    model.add(layers.MaxPooling2D((2, 2)))
    # 64过滤器卷积层
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    # 最大池化
    model.add(layers.MaxPooling2D((2, 2)))
    # 128过滤器卷积层
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    # 最大池化
    model.add(layers.MaxPooling2D((2, 2)))
    # 128过滤器卷积层
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    # 最大池化
    model.add(layers.MaxPooling2D((2, 2)))
    # 展平
    model.add(layers.Flatten())
    # 全连接层
    model.add(layers.Dense(512, activation='relu'))
    # 二分类层
    model.add(layers.Dense(1, activation='sigmoid'))
    # 输出模型
    model.summary()
    return model


if __name__ == '__main__':
    # 数据预处理,返回批量生成器
    train_generator, validation_generator = data_initial()
    model = create_convnet_model()
    # 编译模型
    model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])

    # 开始拟合,计时
    start_time = datetime.now()
    # 使用批量生成器拟合模型
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=100,
        epochs=30,
        validation_data=validation_generator,
        validation_steps=50
    )
    finish_time = datetime.now()
    print('运行时间: ', (finish_time - start_time).seconds)
    # 保存模型
    model.save('cats_vs_dogs_small.h5')
    print(history.history)
    # 绘制损失曲线和精度曲线
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['loss']
    epochs = range(1, len(acc) + 1)

    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'b', label='Validation acc')
    plt.title('Training and validation accuracy')
    plt.legend()
    plt.figure()

    plt.plot(epochs, loss, 'bo', label='Training loss')
    plt.plot(epochs, val_loss, 'b', label='Validation loss')
    plt.title('Training and validation loss')
    plt.legend()
    plt.show()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186

跑了接近四十分钟,明天敲优化。
在这里插入图片描述

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/385250
推荐阅读
相关标签
  

闽ICP备14008679号