赞
踩
- import torch
- import torch.nn.functional as F
- import matplotlib.pyplot as plt
-
- x = torch.Tensor.unsqueeze(torch.Tensor.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
- y = x.pow(2) + 0.2 * torch.rand()
-
-
- class Net(torch.nn.Module):
- def __init__(self, n_feature, n_hidden, n_output):
- super(Net, self).__init__() #继承init
- #定义每层的形式
- self.hidden = torch.nn.Linear(n_feature, n_hidden)
- self.predict = torch.nn.Linear(n_hidden, n_output)
-
- def forward(self, x):
- #正向传播输入值,神经网络分析出输出值
- x = F.relu(self.hidden(x)) #激励函数(隐藏层的线性值)
- x = self.predict(x) #输出值
- return x
-
- net = Net(n_feature=1, n_hidden=10, n_output=1)
-
- print(net)
-
- optimizer = torch.optim.SGD(net.parameters(), lr=0.2) #传入net的所有参数,学习率
- loss_func = torch.nn.MSELoss() #预测值和真实值的误差计算公式(均方差)
-
- for t in range(100):
- prediction = net(x)
-
- loss = loss_func(prediction, y)

tensorflow:
- #--coding:utf-8--
-
- import tensorflow as tf
- from numpy.random import RandomState
-
- batch_size = 8
-
- #定义神经网络参数
- w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
- w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
-
- x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')
- y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
-
- #前向传播过程
- a = tf.matmul(x, w1)
- y = tf.matmul(a, w2)
-
- #定义损失函数和反向传播算法
- y = tf.sigmoid(y)
- cross_entropy = -tf.reduce_mean(
- y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)) + (1 - y) * tf.log(tf.clip_by_value(1 - y, 1e-10, 1.0))
- )
-
- train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
-
- rdm = RandomState(1)
- dataset_size = 128
- X = rdm.rand(dataset_size, 2)
- Y = [[int(x1 + x2 < 1)] for (x1, x2) in X]
-
- with tf.Session() as sess:
- init_op = tf.global_variables_initializer()
- sess.run(init_op)
-
- print(sess.run(w1))
- print(sess.run(w2))
-
- #开始训练
- STEPS = 5000
- for i in range(STEPS):
- start = (i * batch_size) % dataset_size
- end = min(start + batch_size, dataset_size)
-
- sess.run(train_step,
- feed_dict={x: X[start:end], y_: Y[start:end]})
-
- if i % 1000 == 0:
- total_cross_entropy = sess.run(
- cross_entropy, feed_dict={x: X, y_: Y})
- print("After %d training steps, cross entropy on all data is %g" % (i, total_cross_entropy))
-
- print(sess.run(w1))
- print(sess.run(w2))

Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。