tensorflow很擅长于搭建神经网络作图像识别任务,咱们用MNIST手写图像集来作数据源,试验经典的深度学习CNN模型。git
(1)导入mnist数据集,这里图像image是[batch_size,784]大小,labels是[batch_size,10]大小网络
(2)设置X、Y占位符session
(3)搭建图像卷积池化激活神经网络ide
(4)训练数据训练模型,获得loss和acc曲线,评估模型效果。学习
import warnings warnings.filterwarnings('ignore') import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import numpy as np import os # 读取mnist数据,image是[batch_size,784]大小,labels是[batch_size,10]大小 mnist = input_data.read_data_sets('MNIST_data', one_hot=True) batch_size = 100 X_holder = tf.placeholder(tf.float32,shape=[None, 784]) y_holder = tf.placeholder(tf.float32,shape=[None, 10]) images, labels = mnist.train.next_batch(batch_size) print("image shape:%s, labels shape:%s"%(images.shape,labels.shape)) #权重在初始化时应该加入少许的噪声来打破对称性以及避免0梯度,避免神经元节点输出恒为0的问题(dead neurons) def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #第一层卷积层,32个卷积核去分别关注32个特征 W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) x_image = tf.reshape(X_holder, [-1,28,28,1])#将单张图片从784维向量从新还原为28x28的矩阵图片,-1表示取出全部的数据 h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) #第二层卷积层 W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) #全链接层 W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #使用Dropout,训练时为0.5,测试时为1,keep_prob表示保留不关闭的神经元的比例 keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #把1024维的向量转换成10维,对应10个类别 W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 #交叉熵 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_holder, logits=y_conv)) #定义train_step train = tf.train.AdamOptimizer(1e-4).minimize(loss) #定义测试准确率 correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_holder,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) session = tf.Session() init = tf.global_variables_initializer() session.run(init) iterations = 1000 def trainme(): steps = np.zeros(iterations) LOSS = np.zeros_like(steps) for step in range(iterations): train_X, train_Y = mnist.train.next_batch(batch_size) _,loss_value,accuracy_value = session.run([train,loss,accuracy], feed_dict={X_holder:train_X, y_holder:train_Y,keep_prob:0.5}) steps[step]=step LOSS[step]=accuracy_value if step % 25 == 0: print('step:%d accuracy:%.4f, loss:%s' %(step, accuracy_value,loss_value)) #show plt import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(steps,LOSS,label='loss') ax.set_xlabel('step') ax.set_ylabel('loss') fig.suptitle('MSE') handles,labels = ax.get_legend_handles_labels() ax.legend(handles,labels=labels) plt.show() def batchPredict(batch_size): test_X,test_Y = mnist.test.next_batch(batch_size) predict_labels = session.run(predict_y, feed_dict={X_holder:test_X, y_holder:test_Y}) image_number = test_X.shape[0] for index in range(image_number): if index < image_number: image = test_X[index] actual = np.argmax(test_Y[index]) predict = np.argmax(predict_labels[index]) isTrue = actual==predict title = 'actual:%d ,predict:%d' %(actual,predict) if not isTrue: print(title) print(predict_labels[index]) trainme()