赞
踩
本文主要使用tensorflow、numpy、matplotlib、jupyternotebook进行训练
1.导入库
import numpy as np
import tensorflow as tf
2.从npz文件读取numpy数组
#numpy文件地址 filename="./datas/mnist/mnist.npz" with np.load(filename) as data: train_examples=data['x_train'] train_labels=data['y_train'] test_examples=data['x_test'] test_labels=data['y_test'] print(type(train_example),type(train_labels)) print(train_examples.ndim,train_labels.ndim) print(train_examples.shape,train_labels.shape) print(train_examples.dtype,train_labels.dtype) train_examples[1] train_examples[0].shape train_labels[0] import matplotlib.pyplot as plt plt.imshow(train_examples[0]) plt.show()
3.加载Numpy数组到tf.data.Dataset
tf.data.Dataset.from_tensor_slices可以接收元祖,特征矩阵、标签向量,要求它们行数(样本数)相等,会按行匹配组合
train_dataset=tf.data.Dataset.from_tensor_slices((train_examples,train_labels))
test_dataset=tf.data.Dataset.from_tensor_slices((test_examples,test_labels))
#查看数据集的一个样本(这时包含了所有特征列、标签列)
train_dataset.as_numpy_iterator().next()
4.打乱和批次化数据集
BATCH_SIZE=64
SHUFFLE_BUFFER_SIZE=100
shuffle_ds=train_dataset.shuffle(SHUFFLE_BUFFER_SIZE)
train_dataset=shuffle_ds.batch(BATCH_SIZE)
test_dataset=test_dataset.batch(BATCH_SIZE)
5.建立和训练模型
#input_shape要省略输入数据的第一维度,(60000,28,28),只需要输入(28,28)
#这里的input_shape其实就等于train_examples.shape[1:]
first_layer=tf.keras.layers.Flatten(input_shape=(28,28))
#搭建模型
model=tf.keras.Sequential([
first_layer,
tf.keras.layers.Dense(128,activation='relu'),
tf.keras.layers.Dense(10,activation='softmax')
])
#模型编译
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
#查看模型信息
model.summary()
6.训练和评估
model.fit(train_dataset,epochs=10)
model.evaluate(test_dataset)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。