反向传播python
, loss=tf.reduce_mean(tf.square(y_-y))网络
4.反向传播训练方法:以减少loss值为优化目标app
5.学习率:决定参数每次更新的幅度dom
import tensorflow as tf import numpy as np SIZE=8#一次喂入数据的组数 seed=23455 #基于seed产生随机数 rng=np.random.RandomState(seed) #随机返回32*2(体积和重量)的矩阵,做为数据集(特征), data=rng.rand(32,2) #设置数据标签 data_lab=[[int(x1+x2<1)] for (x1,x2) in data]#合格为1,不合格为0 for j in range(len(data)): print("%d.data:%s ,data_lab:%s"%(j+1,data[j],data_lab[j])) #定义神经网络的输入,参数和输出,定义前向传播的过程 x=tf.compat.v1.placeholder(tf.float32,shape=(None,2))#输入,只知道有两个特征,不知道有多少组数据 y_=tf.compat.v1.placeholder(tf.float32,shape=(None,1))#合格以否的标签, print(x,y_) w1=tf.Variable(tf.random.normal([2,3],stddev=1,seed=1)) w2=tf.Variable(tf.random.normal([3,1],stddev=1,seed=1)) #前向传播过程,经过矩阵相乘实现 a=tf.matmul(x,w1) y=tf.matmul(a,w2) #定义损失函数及反向传播方法 loss=tf.reduce_mean(tf.square(y-y_))#均方偏差计算loss train_step=tf.compat.v1.train.GradientDescentOptimizer(0.001).minimize(loss)#梯度降低,0.001为学习率 #train_step=tf.train.MomentumOptimizer(0.001,0.9).minimize(loss) #train_step=tf.train.AdamOptimizer(0.001).minimize(loss) #生成会话,训练STEPS=3000轮 with tf.compat.v1.Session() as sess: init_op=tf.compat.v1.global_variables_initializer()#初始优化前的参数 sess.run(init_op) #输入目前(未训练)的参数值 print("未训练参数w1:\n",sess.run(w1)) print("未训练参数w1:\n",sess.run(w2)) steps=3200 for i in range(steps): start=(i * SIZE) % 32 end=start+SIZE sess.run(train_step,feed_dict={x:data[start:end],y_:data_lab[start:end]})#分段喂入数据 total_loss=sess.run(loss,feed_dict={x:data,y_:data_lab}) lossData.append(total_loss) if i % 500 == 0: print("After %d training step(s),loss on all data is %g"%(i,total_loss)) print('\n') print("训练后参数w1:\n",sess.run(w1)) print("训练后参数w2:\n",sess.run(w2)) plt.plot(lossData) plt.show()
loss在训练3000轮过程当中的变化函数