梯度下降法
import tensorflow as tf
import numpy as np
#生成随机点
x_data = np.random.rand(100) #产生随机点
y_data = x_data * 0.1 +0.2
#构造一个线性模型
b = tf.Variable(0.)
k = tf.Variable(0.)
y = k*x_data + b
# 定义loss
loss = tf.reduce_mean(tf.square(y_data-y))
#定义优化方法
optimizar = tf.train.GradientDescentOptimizer(0.2)
#最小化loss
train = optimizar.minimize(loss)
#变量初始化
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(201):
sess.run(train)
if step%20 == 0:
print sess.run([k, b])
[0.055063572, 0.10062776]
[0.10450891, 0.19752166]
[0.1027341, 0.19849722]
[0.10165788, 0.19908877]
[0.10100529, 0.19944745]
[0.10060959, 0.19966495]
[0.10036964, 0.19979683]
[0.10022413, 0.1998768]
[0.1001359, 0.1999253]
[0.10008241, 0.1999547]
[0.10004997, 0.19997254]