tensorflow 入门

1 My First Demo

先运行下咱们的第一个demo:python

import tensorflow as tf
import numpy as np

#creat data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1+0.3

#creat tensorflow structure start #
Weights = tf.Variable(tf.random_uniform([1],-1.0,1.0))
biases = tf.Variable(tf.zeros([1]))

y = Weights*x_data+biases

loss = tf.reduce_mean(tf.square(y-y_data))   #cost function
optimizer = tf.train.GradientDescentOptimizer(0.5)  #with gradient decent method & 0.5 is the learning rate
train = optimizer.minimize(loss)  #minimize the cost funtion

init = tf.global_variables_initializer()  ##???????????
#creat tensorflow structure end #

sess = tf.Session()
sess.run(init)  #Very Important ---activate the netrul network

for step in range(201):
    sess.run(train)
    if step % 20 == 0:
        print (step,sess.run(Weights),sess.run(biases))

2 session

tensorflow中的全部定义和函数都须要经过session.run以后才能真正运行markdown

###
session tutorial
###
import tensorflow as tf

matrix1 = tf.constant([[3,3]])

matrix2 = tf.constant([[2],[2]])

product = tf.matmul(matrix1,matrix2)  #matrix multiply np.dot(m1,m2)

#method 1
sess = tf.Session()
result = sess.run(product)
print (result)
sess.close()

#method 2 with method will close the session automatically
with tf.Session() as sess:
    result2 = sess.run(product)
    print (result2)

3 Variable

定义变量须要用tf.Variable声明,而且变量须要global_variables_initializer最终完成定义,而最终变量生成须要借助session,run过以后才是真正的变量session

global_variables_initializer何时须要用?dom

# variable tutotial



import tensorflow as tf

state = tf.Variable(0,name = 'counter')
#print (state.name)
one = tf.constant(1)

new_value = tf.add(state,one)  ##add
update = tf.assign(state,new_value)  ##assignment

init = tf.global_variables_initializer() ##must have if define variable

with tf.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)
        print (sess.run(state))

placehoder

在执行时候才赋值函数

#placehoder
import  tensorflow as tf

input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)

output = tf.multiply(input1,input2)

with tf.Session() as sess:
    print (sess.run(output,feed_dict={input1:[7.],input2:[2.]}))

定义神经层

这里写图片描述

#placehoder
import  tensorflow as tf
import numpy as np

def add_layer (inputs,in_size,out_size,activation_function=None):#activation_function没有激活函数就至关于线性函数
    Weithts = tf.Variable(tf.random_normal([in_size,out_size]))
    biases = tf.Variable(tf.zeros([1,out_size])+0.1)

    Wx_plus_b = tf.matmul(inputs,Weithts)+biases
    if activation_function is None :
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)

    return  outputs

def neural_network():
    #生成数据
    x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
    noise = np.random.normal(0, 0.05, x_data.shape)
    y_data = np.square(x_data) - 0.5 + noise

    # 定义两层
    xs = tf.placeholder(tf.float32, [None, 1])  # None用来限制用例个数
    ys = tf.placeholder(tf.float32, [None, 1])
    l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
    prediction = add_layer(l1, 10, 1, activation_function=None)

    #定义递归降低
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)


    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    for i in range(1000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
        if i % 50 == 0:
            print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))


# x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
# init = tf.global_variables_initializer()
# sess = tf.Session()
# try:
# print(sess.run(tf.convert_to_tensor(x_data)))
#
# except Exception as e:
# print (e)

if  __name__  == '__main__':
  neural_network()