赞
踩
- from keras.datasets import mnist
- (train_images, train_labels), (test_images, test_labels) = mnist.load_data()
- # 1.张量的三个属性
- print(train_images.ndim) # 轴的个数 3
- print(train_images.shape) # 形状 (60000,28,28)
- print(train_images.dtype) # 数据类型 unit8
-
- # 看看其中第4张图的样子
- digit = train_images[4]
- import matplotlib.pyplot as plt
- plt.imshow(digit, cmap = plt.cm.binary)
- plt.show()
-
- # 2.张量操作
- my_slice = train_images[10:100] # 沿着第一个轴选取第10~100个图片(不包括100)
- print(my_slice.shape) # (90,28,28)
- my_slice = train_images[10:100,:,:] # 等价操作
- my_slice = train_images[10:100,0:28,0:28] # 等价操作
-
- # 3.一般来说数据张量的第一个轴(0轴)都是样本轴
- batch = train_images[:128] # 数据集的一个批量,大小为128
- # 3.现实世界的数据张量
- '''
- 1.向量数据:2D张量,形状为(samples,features)
- 2.时间序列数据或序列数据:3D张量,形状为(sample,timesteps,features)
- 3.图像:4D张量,形状为(sample,height,width,channels)or(sample,channels,height,width)
- 4.视频:5D张量,形状为(sample,frame,height,width,channels)or(samples,frames,channels,height,width)
- '''
-
- # 4.张量运算
- # 4.1 numpy中元素运算
- import numpy as np
- x = np.array([1,2,3])
- y = np.array([4,5,-4])
- z = x+y # 逐个元素相加
- print(z) # [5,7,-1]
- z = np.maximum(z,0) # 逐元素relu
- print(z) # [5,7,0]
- # 4.2 广播
- x = np.random.random((64, 3, 32, 10))
- y = np.random.random((32, 10))
- z = np.maximum(x, y) # y的(32,10)重复了(64,3)
- # z的输出为64,3,32,10的张量
- # 4.3张量点积
- x.shape[1] == y.shape[0]
- np.dot(x,y)
- # 4.4张量变形
- train_images = train_images.reshape((60000,28*28))
- x = np.zeros((300,20))
- x = np.transpose(x) # 变形之转置
-
- # 网络架构
- from keras import models
- from keras import layers
- network = models.Sequential()
- network.add(layers.Dense(512,activation='relu',input_shape = (28*28,))) # dense代表全连接层=512我理解的是输出是512神经元在这一层
- network.add(layers.Dense(10,activation = 'softmax')) # 输出10个神经元
- #编译:损失函数,优化器,评价指标
- network.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
- #数据预处理 取值范围在0,1而且是float 32
- train_images = train_images.reshape((60000, 28 * 28))
- train_images = train_images.astype('float32') / 255
- test_images = test_images.reshape((10000, 28 * 28))
- test_images = test_images.astype('float32') / 255
- #准备标签
- from keras.utils import to_categorical
- train_labels = to_categorical(train_labels)
- test_labels = to_categorical(test_labels)
- #模型拟合(会显示之前定义的损失loss和metrics)
- network.fit(train_images, train_labels, epochs=5, batch_size=128)
- #模型测试
- test_loss, test_acc = network.evaluate(test_images, test_labels)
- print('test_acc:', test_acc)
-
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。