TensorFlow框架实战学习(1)

 1 import tensorflow as tf 
 2 import numpy as np 
 3 import matplotlib.pyplot as plt
 4 
 5 
 6 train_X = np.linspace(-1, 1, 1000);
 7 train_Y = 2 * train_X + np.random.randn(*train_X.shape)*0.3
 8 
 9 plt.plot(train_X,train_Y,'ro',label = 'Original data')
10 plt.legend()
11 #plt.show()
12 
13 X = tf.placeholder("float")
14 Y = tf.placeholder("float")
15 
16 W = tf.Variable(tf.random_normal([1]),name = "weight")
17 b = tf.Variable(tf.zeros([1]),name = "bias")
18 
19 z = tf.multiply(X,W)+b
20 
21 #评估结果,利用梯度下降来反馈结果
22 cost = tf.reduce_mean(tf.square(Y - z))
23 learning_rate = 0.01 
24 optimizier = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
25 
26 
27 init = tf.global_variables_initializer()
28 
29 training_epochs = 30
30 display_step = 2
31 
32 #session 
33 
34 with tf.Session() as sess:
35     sess.run(init)
36     plotdata = {"batchsize":[],"loss":[]}
37     for epoch in range(training_epochs):
38         for (x,y) in zip(train_X,train_Y):
39             sess.run(optimizier,feed_dict = {X : x,Y : y})
40 
41 
42         if epoch % display_step == 0:
43             loss = sess.run(cost,feed_dict = {X:train_X,Y:train_Y})
44             print("Epoch:",epoch+1,"cost=",loss,"W=",sess.run(W),"b=",sess.run(b))
45             if not (loss == "NA"):
46                 plotdata["batchsize"].append(epoch)
47                 plotdata["loss"].append(loss)
48 
49     print(" Finished!")
50     print ("cost = ",sess.run(cost,feed_dict = {X : train_X , Y : train_Y}),"W = ", sess.run(W),"b = ",sess.run(b))

 代码主要是对给定的 y ≈ 2x 进行学习,模拟出合适的权值w和偏执值b。

 

下面记录一下各种函数的作用,有助于本人学习记忆python库和函数:

1.首先是linspace函数:numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)

  返回(start,stop)间的num个数,endpoint为真sample必然有终点,为否必然没有终点,retstep表示一定步长,最后返回(sample,step)

2.np.random.randn : numpy.random.rand(d0, d1, …, dn)

  随机产生维度为(d0,d1…,dn)的随机数,从标准正态分布中产生。

  但random.rand(d0,d1…,dn)产生数值取值范围是[0,1)

3.tf.placeholder : tf.placeholder(dtype,shape = None , name = None)

  占位数,可以理解为变量,在需要的时候才去赋值

  dtype 表示数据类型,例如tf.float32,tf.float64,shape表示数组形状,shape = None 表示一维数,还可以shape  =[3,4],shape = [None,4]表示行未定。返回Tensor类型。

4.tf.Variable.int()

 

  tf.Variable.init(initial_value, trainable=True, collections=None, validate_shape=True, name=None)

  参数表引用相关资料:

initial_value所有可以转换为Tensor的类型变量的初始值
trainablebool如果为True,会把它加入到GraphKeys.TRAINABLE_VARIABLES,才能对它使用Optimizer
collectionslist指定该图变量的类型、默认为[GraphKeys.GLOBAL_VARIABLES]
validate_shapebool如果为False,则不进行类型和维度检查
namestring变量的名称,如果没有指定则系统会自动分配一个唯一的值

5.tf.random_normal() : tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)

  代码中仅用到shape=[1],表示一维数

6.tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None) 求平均值

  input_tensor:待求值的tensor。

  reduction_indices:在哪一维上求解。

7.tf.train.GrandientDescentOptimizer(learning_rate)

  产生一个规定学习率的优化器:optmizier = tf.train.GrandientDescentOptimizer(learning_rate = 0.01)

  计算梯度并且直接作用于变量上:optimizer.minimize(cost , var_list = <list of variables>)

  optimizer.run()

  计算出梯度:gradients = optimizer.compute_gradients(loss , <list of variables>)

  然后就可以根据自己的需要处理梯度了。

8.tf.global_variables_initializer()初始化所有Tensor变量的状态

  init = tf.global_variables_initializer()

  with tf.Session() as sess:

    sess.run(init)

 

再贴一个补充代码:增加了可视化,增加了checkpoint的保存和载入

 1 import tensorflow as tf 
 2 import numpy as np 
 3 import matplotlib.pyplot as plt
 4 
 5 plotdata = {"batchsize":[],"loss":[]}
 6 
 7 def moving_average(a, w = 10):
 8     if len(a)<w:
 9         return a[:]
10     return [val if idx < w else sum(a[(idx-w):idx])/w for idx , val in enumerate(a)]
11 
12 train_X = np.linspace(-1,1,100)
13 train_Y = 2*train_X + np.random.randn(*train_X.shape)*0.2
14 
15 plt.plot(train_X,train_Y,'ro',label = 'Original data')
16 plt.legend()
17 plt.show()
18 
19 tf.reset_default_graph()
20 
21 #build model
22 
23 X = tf.placeholder("float32")
24 Y = tf.placeholder("float32")
25 
26 W = tf.Variable(tf.random_normal([1]),name = "weight")
27 b = tf.Variable(tf.zeros([1]),name = "bias")
28 
29 z = tf.multiply(X,W)+b
30 
31 #reverse optimization
32 
33 cost = tf.reduce_mean(tf.square(Y - z))
34 learning_rate = 0.01
35 
36 #optimizer
37 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
38 
39 #clear all variable
40 
41 init = tf.global_variables_initializer()
42 
43 #setnumber
44 
45 training_epochs = 20
46 display_step = 2
47 saver = tf.train.Saver(max_to_keep = 1)
48 savedir = "/Users/tiemuer/Desktop/python_workfile/savermodel_2/"
49 
50 #session
51 
52 with tf.Session() as sess:
53     sess.run(init)
54 
55     #input data
56 
57     for epoch in range(training_epochs):
58         for (x,y) in zip(train_X,train_Y):
59             sess.run(optimizer,feed_dict = {X:x,Y:y})
60 
61 
62         if epoch % display_step ==0:
63             loss = sess.run(cost,feed_dict = {X:train_X,Y:train_Y})
64         print ("Epoch:",epoch+1,"cost = ",loss,"W = ",sess.run(W),"b = ",sess.run(b))
65 
66         if not (loss == "NA"):
67             plotdata["batchsize"].append(epoch)
68             plotdata["loss"].append(loss)
69         saver.save(sess,savedir+"linermodel.cpkt",global_step = epoch)
70 
71     print("finished")
72     print("cost = ",sess.run(cost,feed_dict = {X:train_X,Y:train_Y}),"W = ",sess.run(W),"b = ",sess.run(b))
73 
74 
75     #show model
76 
77     plt.plot(train_X,train_Y,'ro',label = 'Original data')
78     plt.plot(train_X,sess.run(W)*train_X + sess.run(b) , label = 'Fitted Wline')
79     plt.legend()
80     plt.show()
81 
82     plotdata["avgloss"] = moving_average(plotdata["loss"])
83     plt.figure(1)
84     plt.subplot(211)
85     plt.plot(plotdata["batchsize"],plotdata["avgloss"],'b--')
86     plt.xlabel('Minibatch number')
87     plt.ylabel('Loss')
88     plt.title('Minibatch run vs. Training loss')
89 
90     plt.show()
91 
92 
93 load_epoch = 19
94 with tf.Session() as sess2:
95     sess2.run(tf.global_variables_initializer())
96     saver.restore(sess2,savedir+"linermodel.cpkt-" + str(load_epoch))
97     print("x = 0.2 , z = ",sess2.run(z,feed_dict = {X : 0.2}))

 

  

 

    原文作者:tensorflow
    原文地址: https://www.cnblogs.com/tiemuer/p/8901872.html
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞