赞
踩
数据集包含两个文件,训练集和验证集。每个文件夹包含10个标记为n0〜n9的子文件夹,每个子生文件夹不同类型的猴子。图像尺寸为400x300像素或更大,并且为JPEG格式(近1400张图像)。
具体如下:
可以直接在kaggle官网上使用平台提供的GPU来训练模型:
在对应的数据集下面直接New Notebook即可。
然后可以打开GPU,如果没有登录的界面不是这样,所以需要先注册账号再登录即可。
第一个代码框可以直接读取数据集的文件路径
然后导入深度学习一般需要的库并查看对应的版本:
%matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sklearn import sys import tensorflow as tf import time from tensorflow import keras print(tf.__version__) print(sys.version_info) for module in mpl, np, pd, sklearn, tf, keras: print(module.__name__, module.__version__)
输出内容如下:
查看对应的文件是否存在,以及显示它的子文件名
train_dir = "/kaggle/input/10-monkey-species/training/training"
valid_dir = "/kaggle/input/10-monkey-species/validation/validation"
label_file = "/kaggle/input/10-monkey-species/monkey_labels.txt"
print(os.path.exists(train_dir))
print(os.path.exists(valid_dir))
print(os.path.exists(label_file))
print(os.listdir(train_dir))
print(os.listdir(valid_dir))
输出内容如下:
读取标签文件
labels = pd.read_csv(label_file, header = 0)
print(labels)
输出内容如下:
#对读取的图片进行大小变换,同样尺寸。 height = 128 width = 128 channels = 3 batch_size = 64 num_classes = 10 #读取数据并对图片进行相应的操作,可以提高鲁棒性。(数据增强操作) train_datagen = keras.preprocessing.image.ImageDataGenerator( #像素点都除以255 rescale = 1./255, #旋转(在这个范围内随机) rotation_range = 40, #水平平移 width_shift_range = 0.2, #高度平移 height_shift_range = 0.2, #剪切强度 shear_range = 0.2, #缩放强度 zoom_range = 0.2, #是否进行水平翻转 horizontal_flip = True, #对像素点填充时使用临近值填充 fill_mode = "nearest" ) #读取训练数据 train_generator = train_datagen.flow_from_directory(train_dir, target_size = (height, width), batch_size = batch_size, #随机数 seed = 7, shuffle = True, # label的编码格式:这里为one-hot编码 class_mode = "categorical") #对验证集的操作 valid_datagen = keras.preprocessing.image.ImageDataGenerator(rescale = 1./255) #读取验证集 valid_generator = valid_datagen.flow_from_directory(valid_dir, target_size = (height, width), batch_size = batch_size, seed = 7, #不需要对验证集洗牌 shuffle = False, class_mode = 'categorical') #训练集和验证集分别由多少张数据 train_num = train_generator.samples valid_num = valid_generator.samples print(train_num, valid_num)
输出内容如下:
显示2个batch_size训练集的形状和标签的形状:
for i in range(2):
x, y = train_generator.next()
print(x.shape, y.shape)
print(y)
输出内容如下:
#构建模型,selu在卷积神经网络训练效果不佳,改为relu model = keras.models.Sequential([ keras.layers.Conv2D(filters=32, kernel_size=3,padding='same', activation="relu",input_shape=[width, height, channels]), keras.layers.Conv2D(filters=32, kernel_size=3,padding='same',activation="relu"), keras.layers.MaxPool2D(pool_size=2), keras.layers.Conv2D(filters=64, kernel_size=3,padding='same',activation="relu"), keras.layers.Conv2D(filters=64, kernel_size=3,padding='same',activation="relu"), keras.layers.MaxPool2D(pool_size=2), keras.layers.Conv2D(filters=128, kernel_size=3,padding='same',activation="relu"), keras.layers.Conv2D(filters=128, kernel_size=3,padding='same',activation="relu"), keras.layers.MaxPool2D(pool_size=2), #全连接层 keras.layers.Flatten(), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(num_classes,activation='softmax') ]) #编译模型 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics = ["accuracy"]) model.summary()
输出内容如下:(不完全截图)
接着可以直接训练模型了:
epochs = 30
# 数据是generator出来的,所以不能直接用fit
history = model.fit_generator(train_generator,
steps_per_epoch = train_num // batch_size,
epochs=epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size
)
输出内容如下:(不完全截图)
画出学习曲线图
print(history.history.keys())
def plot_learning_curves(history, label, epochs, min_value, max_value):
data = {}
data[label]=history.history[label]
data['val_'+label]=history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
#打印网格
plt.grid(True)
#横坐标:0~epochs,纵坐标是:min_value~max_value
plt.axis([0, epochs, min_value, max_value])
plt.show()
#分别打印accuray和loss
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 1.5, 2.5)
在resnet50做网络微调:resnet50是拥有50个层次的残差网络
主要是修改构建模型的这个代码块,前面的代码与上面的相同。
resnet50_fine_tune = keras.models.Sequential()
#cnn架构的典型残差网络。
resnet50_fine_tune.add(keras.applications.ResNet50(include_top = False,
#表示排除顶层的输出网络
pooling = 'avg',
weights = 'imagenet'))
resnet50_fine_tune.add(keras.layers.Dense(num_classes, activation = 'softmax'))
#只调整顶层的权重参数
resnet50_fine_tune.layers[0].trainable = False
#编译模型
resnet50_fine_tune.compile(loss="categorical_crossentropy",
optimizer="sgd", metrics = ["accuracy"])
resnet50_fine_tune.summary()
epochs = 10
# 数据是generator出来的,所以不能直接用fit
history = resnet50_fine_tune.fit_generator(train_generator,
steps_per_epoch = train_num // batch_size,
epochs=epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size
)
print(history.history.keys())
def plot_learning_curves(history, label, epochs, min_value, max_value):
data = {}
data[label]=history.history[label]
data['val_'+label]=history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
#打印网格
plt.grid(True)
#横坐标:0~epochs,纵坐标是:min_value~max_value
plt.axis([0, epochs, min_value, max_value])
plt.show()
#分别打印accuray和loss
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 0, 2)
前面的模型是直接使用resnet50的权重,而不做改变。现在想让retnet50的后5层经过训练改变权重。
resnet50 = keras.applications.ResNet50(include_top = False,
pooling = 'avg',
weights = 'imagenet')
#对resnet后5层之前的层次遍历,不训练。
for layer in resnet50.layers[0:-5]:
layer.trainable = False
resnet50_new = keras.models.Sequential([
resnet50,
keras.layers.Dense(num_classes, activation = 'softmax'),
])
resnet50_new.compile(loss="categorical_crossentropy",
optimizer="sgd", metrics = ["accuracy"])
resnet50_new.summary()
epochs = 10
history = resnet50_new.fit_generator(train_generator,
steps_per_epoch = train_num // batch_size,
epochs=epochs,
validation_data = valid_generator,
validation_steps = valid_num // batch_size
)
def plot_learning_curves(history, label, epochs, min_value, max_value):
data = {}
data[label]=history.history[label]
data['val_'+label]=history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
#打印网格
plt.grid(True)
#横坐标:0~epochs,纵坐标是:min_value~max_value
plt.axis([0, epochs, min_value, max_value])
plt.show()
#分别打印accuray和loss
plot_learning_curves(history, 'accuracy', epochs, 0, 1)
plot_learning_curves(history, 'loss', epochs, 0, 2)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。