赞
踩
所有文章不设限,我们相遇偶然,相散坦然,互不打扰,各自安好,向阳而生
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv("./data/datalist/iris.csv")
import seaborn as sns
sns.pairplot(dataset.iloc[:, 1:6], hue="Species")
X = dataset.iloc[:, 1:5].values
y = dataset.iloc[:, 5].values
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y1 = encoder.fit_transform(y)
print("处理后的数据集\n", y1[:5])
# 将y1转换为神经网络需要使用的数组结构
Y = pd.get_dummies(y1).values
print("处理后的数据集\n", Y[:5])
处理后的数据集
[0 0 0 0 0]
处理后的数据集
[[1 0 0]
[1 0 0]
[1 0 0]
[1 0 0]
[1 0 0]]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
from keras.layers import Dense
from keras.models import Sequential
from tensorflow.keras.optimizers import Adam
model = Sequential()
# 输入层
model.add(Dense(4, input_shape=(4,), activation="relu"))
# 第一层(8个神经元)
model.add(Dense(8, activation="relu"))
# 第二层(6个神经元)
model.add(Dense(6, activation="relu"))
# 输出层
model.add(Dense(3, activation="softmax"))
model.compile(
# 设置学习率
Adam(learning_rate=0.04),
# 设置损失函数
"categorical_crossentropy",
# 指定准确率计算方式
metrics=["accuracy"],
)
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 4) 20 _________________________________________________________________ dense_1 (Dense) (None, 8) 40 _________________________________________________________________ dense_2 (Dense) (None, 6) 54 _________________________________________________________________ dense_3 (Dense) (None, 3) 21 ================================================================= Total params: 135 Trainable params: 135 Non-trainable params: 0 _________________________________________________________________
history = model.fit(X_train, Y_train, epochs=500)
Epoch 1/500 4/4 [==============================] - 1s 5ms/step - loss: 1.4580 - accuracy: 0.3833 Epoch 2/500 4/4 [==============================] - 0s 5ms/step - loss: 1.0303 - accuracy: 0.2000 Epoch 3/500 4/4 [==============================] - 0s 5ms/step - loss: 0.9005 - accuracy: 0.5083 Epoch 4/500 4/4 [==============================] - 0s 4ms/step - loss: 0.7248 - accuracy: 0.6917 Epoch 5/500 4/4 [==============================] - 0s 6ms/step - loss: 0.5244 - accuracy: 0.7333 Epoch 6/500 4/4 [==============================] - 0s 6ms/step - loss: 0.3771 - accuracy: 0.8750 Epoch 7/500 4/4 [==============================] - 0s 5ms/step - loss: 0.2725 - accuracy: 0.9583 Epoch 8/500 4/4 [==============================] - 0s 6ms/step - loss: 0.2024 - accuracy: 0.9583 Epoch 9/500 4/4 [==============================] - 0s 5ms/step - loss: 0.2492 - accuracy: 0.8917 Epoch 10/500 4/4 [==============================] - 0s 5ms/step - loss: 0.1896 - accuracy: 0.9250 Epoch 11/500 4/4 [==============================] - 0s 6ms/step - loss: 0.1430 - accuracy: 0.9417 Epoch 12/500 4/4 [==============================] - 0s 5ms/step - loss: 0.1202 - accuracy: 0.9500 Epoch 13/500 4/4 [==============================] - 0s 5ms/step - loss: 0.1458 - accuracy: 0.9417 Epoch 14/500 4/4 [==============================] - 0s 4ms/step - loss: 0.1651 - accuracy: 0.9417 Epoch 15/500 4/4 [==============================] - 0s 5ms/step - loss: 0.1096 - accuracy: 0.9583 Epoch 16/500 4/4 [==============================] - 0s 4ms/step - loss: 0.1139 - accuracy: 0.9583 Epoch 17/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0925 - accuracy: 0.9667 Epoch 18/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0837 - accuracy: 0.9583 Epoch 19/500 4/4 [==============================] - 0s 12ms/step - loss: 0.0887 - accuracy: 0.9667 Epoch 20/500 4/4 [==============================] - 0s 5ms/step - loss: 0.1162 - accuracy: 0.9583 Epoch 21/500 4/4 [==============================] - 0s 4ms/step - loss: 0.1475 - accuracy: 0.9333 Epoch 22/500 4/4 [==============================] - 0s 4ms/step - loss: 0.2753 - accuracy: 0.9250 Epoch 23/500 4/4 [==============================] - 0s 5ms/step - loss: 0.1395 - accuracy: 0.9250 Epoch 24/500 4/4 [==============================] - 0s 4ms/step - loss: 0.1020 - accuracy: 0.9583 Epoch 25/500 4/4 [==============================] - 0s 4ms/step - loss: 0.1085 - accuracy: 0.9583 Epoch 26/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0826 - accuracy: 0.9667 Epoch 27/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0724 - accuracy: 0.9750 Epoch 28/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0723 - accuracy: 0.9750 Epoch 29/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0818 - accuracy: 0.9667 Epoch 30/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0802 - accuracy: 0.9750 Epoch 31/500 4/4 [==============================] - 0s 4ms/step - loss: 0.1195 - accuracy: 0.9583 Epoch 32/500 4/4 [==============================] - 0s 5ms/step - loss: 0.0833 - accuracy: 0.9667 Epoch 33/500 4/4 [==============================] - 0s 4ms/step - loss: 0.0998 - accuracy: 0.9583 Epoch 34/500
…
4/4 [==============================] - 0s 5ms/step - loss: 0.0693 - accuracy: 0.9750
Epoch 499/500
4/4 [==============================] - 0s 5ms/step - loss: 0.0524 - accuracy: 0.9750
Epoch 500/500
4/4 [==============================] - 0s 11ms/step - loss: 0.0916 - accuracy: 0.9500
hist = pd.DataFrame(history.history)
hist.head()
loss | accuracy | |
---|---|---|
0 | 1.457973 | 0.383333 |
1 | 1.030313 | 0.200000 |
2 | 0.900486 | 0.508333 |
3 | 0.724796 | 0.691667 |
4 | 0.524401 | 0.733333 |
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 3))
plt.plot(hist.index, hist["loss"])
plt.show()
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 3))
plt.plot(hist.index, hist["accuracy"])
plt.show()
y_pred = model.predict(X_test)
y_pred_class = np.argmax(y_pred, axis=1)
Y_test_class = np.argmax(Y_test, axis=1)
count = 0
for i in range(len(y_pred_class)):
if y_pred_class[i] == Y_test_class[i]:
count = count + 1
print("正确率:", count / len(Y_test))
正确率: 1.0
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import tensorflow as tf # 1,数据获取 # 加载训练集 TRAIN_URL="http://download.tensorflow.org/data/iris_training.csv" train_path=tf.keras.utils.get_file(TRAIN_URL.split('/')[-1],TRAIN_URL) # 加载测试集 TEST_URL="http://download.tensorflow.org/data/iris_test.csv" test_path=tf.keras.utils.get_file(TEST_URL.split('/')[-1],TEST_URL) # 使用pandas读取数据,得到FataFram格式数据 df_iris_train=pd.read_csv(train_path,header=0) df_iris_test=pd.read_csv(test_path,header=0) # 转换成numpy格式 iris_train=np.array(df_iris_train) iris_test=np.array(df_iris_test) # 2,数据预处理 # 2.1拆分样本特征和标签 x_train = iris_train[:,0:4] y_train = iris_train[:,4] # print(x_train.shape,y_train.shape) x_test = iris_test[:,0:4] y_test = iris_test[:,4] # print(x_test.shape,y_test.shape) # 2.2数据归一化 # 由于样本的4个征征值尺度相同,不用进行归一化。 # 2.3数据中心化 # 需要按列中心化,所以指定axis=0 x_train=x_train-np.mean(x_train,axis=0) x_test=x_test-np.mean(x_test,axis=0) # print(x_train) # print(x_test) # 2.4类型转换 # 将训练集特征转为float32类型 X_train=tf.cast(x_train,tf.float32) # 将训练集标签转为int32,再转为独热编码 Y_train=tf.one_hot(tf.constant(y_train,tf.int32),3) # print(Y_train[0:4,:]) # 将测试特征转为float32类型 X_test=tf.cast(x_test,tf.float32) # 将测试标签转为int32,再转为独热编码 Y_test=tf.one_hot(tf.constant(y_test,tf.int32),3) # 3,设置超参数 # 学习率 learn_rate=0.5 # 迭代次数 iter=50 # 显示频率 display_step=10 # 模型参数 np.random.seed(612) W1=tf.Variable(np.random.randn(4,16),dtype=tf.float32) B1=tf.Variable(np.zeros([16]),dtype=tf.float32) W2=tf.Variable(np.random.randn(16,3),dtype=tf.float32) B2=tf.Variable(np.zeros([3]),dtype=tf.float32) # 4,训练模型 # 训练准确率 acc_train=[] # 测试准确率 acc_test=[] # 训练损失 cce_train=[] # 测试损失 cce_test=[] for i in range(0,iter+1): with tf.GradientTape() as tape: # 训练集隐含层线性结果 hidden_train = tf.matmul(X_train,W1)+B1 # 训练集隐含层输出 Hidden_train=tf.nn.relu(hidden_train) # 训练集输出层线性结果 pred_train = tf.matmul(Hidden_train,W2)+B2 # 训练集输出层输出 PRED_train=tf.nn.softmax(pred_train) # 训练集交叉熵损失 Loss_train=tf.reduce_mean(tf.keras.metrics.categorical_crossentropy(y_true=Y_train,y_pred=PRED_train)) # 测试集隐含层线性输出 Hidden_test=tf.nn.relu(tf.matmul(X_test,W1)+B1) # 测试集概率输出 PRED_test=tf.nn.softmax(tf.matmul(Hidden_test,W2)+B2) # 测试集交叉熵损失 Loss_test=tf.reduce_mean(tf.keras.metrics.categorical_crossentropy(y_true=Y_test,y_pred=PRED_test)) # 添加交叉熵损失到列表 cce_train.append(Loss_train) cce_test.append(Loss_test) # 计算准确率 accuracy_train=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(PRED_train.numpy(),1),y_train),tf.float32)) accuracy_test=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(PRED_test.numpy(),1),y_test),tf.float32)) # 添加准确率到数组 acc_train.append(accuracy_train) acc_test.append(accuracy_test) # 计算偏导数 grads = tape.gradient(Loss_train,[W1,B1,W2,B2]) # 更新参数 W1.assign_sub(learn_rate*grads[0]) B1.assign_sub(learn_rate*grads[1]) W2.assign_sub(learn_rate*grads[2]) B2.assign_sub(learn_rate*grads[3]) if i % display_step==0: print(i,',训练集准确率:',accuracy_train.numpy(),',训练集损失:',Loss_train.numpy(),',测试集准确率:',accuracy_test.numpy(),',测试集损失:',Loss_test.numpy()) # 5,可视化结果 plt.figure(figsize=(10,3)) # 5.1绘制损失 plt.subplot(121) # 绘制训练集损失 plt.plot(cce_train,color="blue",label="train") # 绘制测试集损失 plt.plot(cce_test,color="red",label="test") plt.xlabel("Iter") plt.ylabel("cce") plt.legend(["train","test"]) # 5.2绘制准确率 plt.subplot(122) # 绘制训练集准确率 plt.plot(acc_train,color="blue",label="train") # 绘制测试集准确率 plt.plot(acc_test,color="red",label="test") plt.xlabel("Iter") plt.ylabel("acc") plt.legend(["train","test"]) plt.show()
0 ,训练集准确率: 0.43333334 ,训练集损失: 2.2056413 ,测试集准确率: 0.4 ,测试集损失: 1.721138
10 ,训练集准确率: 0.94166666 ,训练集损失: 0.20531389 ,测试集准确率: 0.96666664 ,测试集损失: 0.24966088
20 ,训练集准确率: 0.95 ,训练集损失: 0.14953992 ,测试集准确率: 1.0 ,测试集损失: 0.16710347
30 ,训练集准确率: 0.9583333 ,训练集损失: 0.12234607 ,测试集准确率: 1.0 ,测试集损失: 0.12469266
40 ,训练集准确率: 0.9583333 ,训练集损失: 0.10509937 ,测试集准确率: 1.0 ,测试集损失: 0.09986884
50 ,训练集准确率: 0.9583333 ,训练集损失: 0.092933945 ,测试集准确率: 1.0 ,测试集损失: 0.08488502
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。