赞
踩
链接:https://pan.baidu.com/s/1N2fHG0r3YkiQvQ7VOUF7Hw
提取码:8ulu
复制这段内容后打开百度网盘手机App,操作更方便哦--来自百度网盘超级会员V5的分享
python 3.6
tensorflow 1.14.0
numpy 1.19.5
glob2 0.7
matplotlib 3.3.3
import tensorflow as tf
import numpy as np
import glob
import matplotlib.pyplot as plt
imags_path = glob.glob('./data/animals/*/*.jpg') # 读取所有图片路径 all_labels_name = [imag_p.split("\\")[1] for imag_p in imags_path] # 获取标签名 label_names = np.unique(all_labels_name) # 去除重复的 label_to_index = dict((name, i) for i, name in enumerate(label_names)) print(label_to_index) # 名字与标号一一对应, 例如1 dog,2 cat index_to_label = dict((v, k) for k, v in label_to_index.items()) # 标号-名字 all_labels = [label_to_index.get(name) for name in all_labels_name] # 将标签名字全部转化为数字标号 np.random.seed() random_index = np.random.permutation(len(imags_path)) # 随机标号 imags_path = np.array(imags_path)[random_index] # 图片路径和标签用同一随机数打乱 all_labels = np.array(all_labels)[random_index] i = int(len(imags_path)*0.8) # 取80%为训练集,20%为测试集 train_path = imags_path[ :i] train_labels = all_labels[ :i] test_path = imags_path[i: ] test_labels = all_labels[i: ] train_ds = tf.data.Dataset.from_tensor_slices((train_path, train_labels)) # 数据保存为dataset test_ds = tf.data.Dataset.from_tensor_slices((test_path, test_labels)) # 加载图片,进行预处理 def load_img(path, label): image = tf.io.read_file(path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [128, 128]) image = tf.cast(image, tf.float32) image = image/255 return image, label AUTOYUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.map(load_img, num_parallel_calls=AUTOYUNE) test_ds = test_ds.map(load_img, num_parallel_calls=AUTOYUNE) BATCH_SIZE = 8 train_ds = train_ds.repeat().shuffle(200).batch(BATCH_SIZE) # 打乱数据 test_ds = test_ds.batch(BATCH_SIZE)
使用Sequential模型, 使用语法:model = tf.keras.Sequential()
输入层:model.add(tf.keras.layers.Conv2D(64, (3, 3), input_shape=(128, 128, 3), activation='relu'))
Conv2D函数参数
添加层:model.add(tf.keras.layers.BatchNormalization())
BatchNormalization作用
继续添加层,增加网络深度,并添加池化层
池化层:model.add(tf.keras.layers.MaxPooling2D())
最后添加输出层:model.add(tf.keras.layers.Dense(5, activation='softmax'))
因为该网络为5分类,所以Dense层输出5维度
Dense参数
添加全局池化层:model.add(tf.keras.layers.GlobalAveragePooling2D())
GlobalAveragePooling2D
model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(64, (3, 3), input_shape=(128, 128, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Conv2D(256, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(256, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.GlobalAveragePooling2D()) model.add(tf.keras.layers.Dense(1024, activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dense(5, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['acc']
)
train_count = len(train_path)
test_count = len(test_path)
step_per_epoch = train_count//BATCH_SIZE
validation_step = test_count//BATCH_SIZE
history = model.fit(train_ds, epochs=500, steps_per_epoch=step_per_epoch,
validation_data=test_ds,
validation_steps=validation_step
)
模型保存
model.save("保存路径")
训练集损失函数loss(下),测试集损失函数val_loss
训练集正确率acc,测试集正确率val_acc
测试结果最好有70%的准确率
import tensorflow as tf import numpy as np import glob import matplotlib.pyplot as plt import os tf.compat.v1.disable_eager_execution() //版本兼容问题,不报错可去掉 gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]) # 设置GPU显存 os.environ["CUDA_VISIBLE_DEVICES"] = "0" # 使用0号GPU进行计算 imags_path = glob.glob('./data/animals/*/*.jpg') # 读取所有图片路径 all_labels_name = [imag_p.split("\\")[1] for imag_p in imags_path] # 获取标签名 label_names = np.unique(all_labels_name) # 去除重复的 label_to_index = dict((name, i) for i, name in enumerate(label_names)) print(label_to_index) # 名字与标号一一对应, 例如1 dog,2 cat index_to_label = dict((v, k) for k, v in label_to_index.items()) # 标号-名字 all_labels = [label_to_index.get(name) for name in all_labels_name] # 将标签名字全部转化为数字标号 np.random.seed() random_index = np.random.permutation(len(imags_path)) # 随机标号 imags_path = np.array(imags_path)[random_index] # 图片路径和标签用同一随机数打乱 all_labels = np.array(all_labels)[random_index] i = int(len(imags_path)*0.8) # 取80%为训练集,20%为测试集 train_path = imags_path[ :i] train_labels = all_labels[ :i] test_path = imags_path[i: ] test_labels = all_labels[i: ] train_ds = tf.data.Dataset.from_tensor_slices((train_path, train_labels)) # 数据保存为dataset test_ds = tf.data.Dataset.from_tensor_slices((test_path, test_labels)) # 加载图片,进行预处理 def load_img(path, label): image = tf.io.read_file(path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [128, 128]) image = tf.cast(image, tf.float32) image = image/255 return image, label AUTOYUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.map(load_img, num_parallel_calls=AUTOYUNE) test_ds = test_ds.map(load_img, num_parallel_calls=AUTOYUNE) BATCH_SIZE = 8 train_ds = train_ds.repeat().shuffle(200).batch(BATCH_SIZE) # 打乱数据 test_ds = test_ds.batch(BATCH_SIZE) # 模型 model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(64, (3, 3), input_shape=(128, 128, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Conv2D(256, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(256, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Conv2D(512, (3, 3), activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.GlobalAveragePooling2D()) model.add(tf.keras.layers.Dense(1024, activation='relu')) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dense(5, activation='softmax')) model.compile(optimizer=tf.keras.optimizers.Adam(0.001), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=['acc'] ) train_count = len(train_path) test_count = len(test_path) step_per_epoch = train_count//BATCH_SIZE validation_step = test_count//BATCH_SIZE history = model.fit(train_ds, epochs=500, steps_per_epoch=step_per_epoch, validation_data=test_ds, validation_steps=validation_step ) plt.figure() plt.plot(history.epoch, history.history.get('loss'), label='loss') plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') plt.show() plt.figure() plt.plot(history.epoch, history.history.get('acc'), label='acc') plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc') plt.show()
参考:
B站:日月光华
参考:
B站:[日月光华](https://space.bilibili.com/387143299?spm_id_from=333.788.b_765f7570696e666f.1)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。