005. using tensorboard
import tensorflow as tf
# @
# In tensorflow, session means opening file stream
# So, we can use with statement in using session,
# and naturally we don't need to close stream
# with tf.Session() as sess:
# saver.restore(sess, './tflectcheckpoint')
# for i in range(1000):
# _, loss, acc = sess.run([train, cost, accuracy], tensor_map)
# pred = sess.run(tf.argmax(y, 1), tensor_map)
# if i % 100 == 0:
# saver.save(sess, './tflectcheckpoint')
# print('----------')
# print('step: ', i)
# print('loss: ', loss)
# print('accuracy: ', acc)
# @
# We use 'with' on tf.name_scope()
# We write name_scope into file as name of hidden
# with tf.name_scope('hidden') as scope:
# Tensorflow collects all scopes and then merges them
# And finally, tensorflow shows that merged one on tensorboard
# @
input_data = [[1,5,3,7,8,10,12],
[5,8,10,3,9,7,1]]
label_data = [[0,0,0,1,0],
[1,0,0,0,0]]
INPUT_SIZE = 7
HIDDEN1_SIZE = 10
HIDDEN2_SIZE = 8
CLASSES = 5
Learning_rate = 0.05
x = tf.placeholder(tf.float32, shape=[None, INPUT_SIZE], name='x')
y_ = tf.placeholder(tf.float32, shape=[None, CLASSES], name='y_')
tensor_map = {x: input_data, y_: label_data}
W_h1 = tf.Variable(tf.truncated_normal(shape=[INPUT_SIZE, HIDDEN1_SIZE]), dtype=tf.float32, name='W_h1')
b_h1 = tf.Variable(tf.zeros(shape=[HIDDEN1_SIZE]), dtype=tf.float32, name='b_h1')
W_h2 = tf.Variable(tf.truncated_normal(shape=[HIDDEN1_SIZE, HIDDEN2_SIZE]), dtype=tf.float32, name='W_h2')
b_h2 = tf.Variable(tf.zeros(shape=[HIDDEN2_SIZE]), dtype=tf.float32, name='b_h2')
W_o = tf.Variable(tf.truncated_normal(shape=[HIDDEN2_SIZE, CLASSES]), dtype=tf.float32, name='W_o')
b_o = tf.Variable(tf.zeros(shape=[CLASSES]), dtype=tf.float32, name='b_o')
param_list = [W_h1, b_h1, W_h2, b_h2, W_o, b_o]
saver = tf.train.Saver()
# We input each model(hidden1) into each scope file(hidden_layer_1) as each scope file name(h1scope)
with tf.name_scope('hidden_layer_1') as h1scope:
hidden1 = tf.sigmoid(tf.matmul(x, W_h1) + b_h1, name='hidden1')
with tf.name_scope('hidden_layer_2') as h2scope:
hidden2 = tf.sigmoid(tf.matmul(hidden1, W_h2) + b_h2, name='hidden2')
with tf.name_scope('output_layer') as oscope:
y = tf.sigmoid(tf.matmul(hidden2, W_o) + b_o, name='y')
with tf.name_scope('calculate_costs'):
cost = tf.reduce_sum(-y_*tf.log(y) - (1-y_)*tf.log(1-y), reduction_indices=1)
cost = tf.reduce_mean(cost)
# cost = tf.reduce_mean(cost) is written into summary continuously
tf.scalar_summary('cost', cost)
with tf.name_scope('training'):
train = tf.train.GradientDescentOptimizer(Learning_rate).minimize(cost)
with tf.name_scope('evaluation'):
comp_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(comp_pred, tf.float32))
sess = tf.Session()
# saver.restore(sess, './tflectcheckpoint')
sess.run(tf.initialize_all_variables())
# I merge all scopes
merge = tf.merge_all_summaries()
# I create file with merged scope into summaries folder
train_writer = tf.train.SummaryWriter('./summaries/', sess.graph)
for i in range(1000):
summary, _, loss, acc = sess.run([merge, train, cost, accuracy], tensor_map)
pred = sess.run(tf.argmax(y, 1), tensor_map)
train.writer.add_summaries(summary, i)
if i % 100 == 0:
saver.save(sess, './tflectcheckpoint')
print('----------')
print('step: ', i)
print('loss: ', loss)
print('accuracy: ', acc)
sess.close()
# @
# We created summary files and we should load them on tensorboard
# cd ./summaries/
# tensorboard --logdir=./
# Check port number(ex, 6006)
# Go to localhost:6006
# Go to graphs
# @
# If you need update tensorboard, remove existing summary files, and run code again