002-lab. lineaar regression with tensorflow
import tensorflow as tf
# for reproducibility
tf.set_random_seed(777)
# You create train dataset
# x_of_train is feature
# y_of_train is label
x_of_train=[1,2,3]
y_of_train=[1,2,3]
# You create weight node and bias node as Variable
# Variable is used by tensorflow with manipulating it
# You create weight and bias in 1 dimensional array
W_variable_node=tf.Variable(tf.random_normal([1]),name='weight')
b_variable_node=tf.Variable(tf.random_normal([1]),name='bias')
# You create hypothesis_function_node function node for linear regression model
# H(x)=W_variable_node*x+b_variable_node
hypothesis_function_node=x_of_train*W_variable_node+b_variable_node
# You create loss function node
cost_function_node=tf.reduce_mean(tf.square(hypothesis_function_node-y_of_train))
# You create gradient descent gradient_descent_optimizer_node node
gradient_descent_optimizer_node=tf.train.GradientDescentOptimizer(learning_rate=0.01)
to_be_trained_node=gradient_descent_optimizer_node.minimize(cost_function_node)
# I create Session instance
sess_object=tf.Session()
# When you use Variable(W_variable_node and b_variable_node in this example),
# you should use tf.global_variables_initializer() to initialize Variables
sess_object.run(tf.global_variables_initializer())
# I actually train model
for step in range(2001):
sess_object.run(to_be_trained_node)
if step%200==0:
print("step: ",step,"loss function: ",sess_object.run(cost_function_node),"weight: ",sess_object.run(W_variable_node),"bias: ",sess_object.run(b_variable_node))
# step: 0 loss function: 8.081429e-10 weight: [1.0000329] bias: [-7.511567e-05]
# step: 200 loss function: 3.0881253e-10 weight: [1.0000205] bias: [-4.628927e-05]
# step: 400 loss function: 1.2910206e-10 weight: [1.0000134] bias: [-2.984078e-05]
# step: 600 loss function: 6.086154e-11 weight: [1.0000094] bias: [-2.0469337e-05]
# step: 800 loss function: 3.4305003e-11 weight: [1.0000072] bias: [-1.5142653e-05]
# step: 1000 loss function: 2.2084853e-11 weight: [1.0000057] bias: [-1.212069e-05]
# step: 1200 loss function: 1.6754598e-11 weight: [1.0000051] bias: [-1.0397309e-05]
# step: 1400 loss function: 1.3722949e-11 weight: [1.0000046] bias: [-9.444023e-06]
# step: 1600 loss function: 1.38662415e-11 weight: [1.0000045] bias: [-9.122129e-06]
# step: 1800 loss function: 1.2141991e-11 weight: [1.0000043] bias: [-8.7434355e-06]
# step: 2000 loss function: 1.22781785e-11 weight: [1.0000043] bias: [-8.6126865e-06]
# @
# usingPlaceHolderInLinearRegression.py
import tensorflow as tf
tf.set_random_seed(777)
W_variable_node=tf.Variable(tf.random_normal([1]),name='weight')
b_variable_node=tf.Variable(tf.random_normal([1]),name='bias')
# You create placeholder node
X_placeholder=tf.placeholder(tf.float32)
Y_placeholder=tf.placeholder(tf.float32)
hypothesis_function_node=X_placeholder*W_variable_node+b_variable_node
cost_function_node=tf.reduce_mean(tf.square(hypothesis_function_node-Y_placeholder))
gradient_descent_optimizer_node=tf.train.GradientDescentOptimizer(learning_rate=0.01)
to_be_trained=gradient_descent_optimizer_node.minimize(cost_function_node)
sess_object=tf.Session()
sess_object.run(tf.global_variables_initializer())
for step in range(2001):
cost_value,W_value,b_value,_=\
sess_object.run([cost_function_node\
,W_variable_node\
,b_variable_node\
,to_be_trained]\
,feed_dict={X_placeholder:[1,2,3],Y_placeholder:[1,2,3]})
if step%200==0:
print("step:",step,"cost_value:",cost_value,"W_value:",W_value,"b_value:",b_value)
# step: 0 cost_value: 1.2248668e-09 W_value: [0.9999594] b_value: [9.241529e-05]
# step: 200 cost_value: 4.8144483e-10 W_value: [0.9999744] b_value: [5.7735706e-05]
# step: 400 cost_value: 1.7676409e-10 W_value: [0.99998456] b_value: [3.5225814e-05]
# step: 600 cost_value: 6.862896e-11 W_value: [0.99999034] b_value: [2.1817144e-05]
# step: 800 cost_value: 2.9051725e-11 W_value: [0.9999936] b_value: [1.4159941e-05]
# step: 1000 cost_value: 1.3604525e-11 W_value: [0.9999955] b_value: [9.759523e-06]
# step: 1200 cost_value: 8.128609e-12 W_value: [0.99999654] b_value: [7.3260508e-06]
# step: 1400 cost_value: 5.272227e-12 W_value: [0.99999714] b_value: [5.8677247e-06]
# step: 1600 cost_value: 4.206413e-12 W_value: [0.99999756] b_value: [5.059482e-06]
# step: 1800 cost_value: 3.2448118e-12 W_value: [0.99999774] b_value: [4.5802694e-06]
# step: 2000 cost_value: 3.2448118e-12 W_value: [0.9999978] b_value: [4.4228796e-06]
# You will test model
sess_object.run(hypothesis_function_node,feed_dict={X_placeholder:[5]})
# [ 5.0110054]
sess_object.run(hypothesis_function_node,feed_dict={X_placeholder:[2.5]})
# [ 2.50091505]
sess_object.run(hypothesis_function_node,feed_dict={X_placeholder:[1.5,3.5]})
# [ 1.49687922 3.50495124]
# You will fit line with new train dataset
for step in range(2001):
cost_value,W_value,b_value,_=\
sess_object.run([cost_function_node\
,W_variable_node\
,b_variable_node\
,to_be_trained],
feed_dict={X_placeholder:[1,2,3,4,5]\
,Y_placeholder:[2.1,3.1,4.1,5.1,6.1]})
if step%200==0:
print("step:",step,"cost_value:",cost_value,"W_value:",W_value,"b_value:",b_value)
# step: 0 cost_value: 2.472068e-07 W_value: [1.0003228] b_value: [1.0988345]
# step: 200 cost_value: 6.397231e-08 W_value: [1.0001642] b_value: [1.0994071]
# step: 400 cost_value: 1.6570766e-08 W_value: [1.0000837] b_value: [1.0996982]
# step: 600 cost_value: 4.312187e-09 W_value: [1.0000428] b_value: [1.099846]
# step: 800 cost_value: 1.1405064e-09 W_value: [1.0000219] b_value: [1.0999206]
# step: 1000 cost_value: 3.2591743e-10 W_value: [1.0000118] b_value: [1.0999576]
# step: 1200 cost_value: 7.504468e-11 W_value: [1.0000058] b_value: [1.0999795]
# step: 1400 cost_value: 7.504468e-11 W_value: [1.0000058] b_value: [1.0999795]
# step: 1600 cost_value: 7.504468e-11 W_value: [1.0000058] b_value: [1.0999795]
# step: 1800 cost_value: 7.504468e-11 W_value: [1.0000058] b_value: [1.0999795]
# step: 2000 cost_value: 7.504468e-11 W_value: [1.0000058] b_value: [1.0999795]
print(sess_object.run(hypothesis_function_node,feed_dict={X_placeholder: [5]}))
# 1960 3.32396e-07 [ 1.00037301] [ 1.09865296]
print(sess_object.run(hypothesis_function_node,feed_dict={X_placeholder: [2.5]}))
# 1980 2.90429e-07 [ 1.00034881] [ 1.09874094]
print(sess_object.run(hypothesis_function_node,feed_dict={X_placeholder: [1.5,3.5]}))
# 2000 2.5373e-07 [ 1.00032604] [ 1.09882331]