There was a person who presented an example of rock-paper-scissors in TensorFlow, and when I read the code, I came up with the idea that if I could convert the numerical value into bits So I made it. It is a model that learns and solves the data distributed at y = 2x by the gradient descent method introduced in the tutorial. If you think about doubling in bit operation, it's just a shift.
Convergence of accuracy could be observed due to overfitting. In this graph it is tf.train.GradientDescentOptimizer (0.001) .minimize (_loss)
. The converged value is 0.434, which is close to log e.
■ Learning results
■ Finally converged
# -*- coding: utf-8 -*-
import tensorflow as tf
_input = []
_result = []
_t_input = []
_t_result = []
for p in range(1, 100):
point = p
# Y=2X is represented as bits
_input.append([int(x) for x in format(point, '032b')])
_result.append([int(x) for x in format(point * 2, '064b')])
for p in range(1, 100):
_t_input.append([int(x) for x in format(p, '032b')])
_t_result.append([int(x) for x in format(p * 2, '064b')])
#Prepare variables that represent weights and thresholds(The initial value is zero)
W = tf.Variable(tf.zeros([32, 64]))
b = tf.Variable(tf.zeros([64]))
#Variables for inserting feature vectors during training
x = tf.placeholder("float", [None, 32])
#Define Softmax function
y = tf.nn.softmax(tf.matmul(x, W) + b)
#Variable for entering the true label value during training
supervisor_labels_placeholder = tf.placeholder("float", [None, 64])
#Error from the target value. Value you want to optimize
def get_loss(_output, _supervisor_labels_placeholder):
cross_entropy = -tf.reduce_sum(_supervisor_labels_placeholder * tf.log(_output))
return cross_entropy
#Optimization algorithm
def get_training(_loss):
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(_loss)
return train_step
#Define a function that determines the accuracy of learning
def get_accuracy(_y, _supervisor_labels_placeholder):
correct_prediction = tf.equal(tf.argmax(_y, 1), tf.argmax(_supervisor_labels_placeholder, 1))
return tf.reduce_mean(tf.cast(correct_prediction, "float"))
#Prepare a session
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter('data', graph_def=sess.graph_def)
#Variable initialization process
init = tf.initialize_all_variables()
sess.run(init)
loss = get_loss(y, supervisor_labels_placeholder)
training_op = get_training(loss)
#Teacher data settings
feed_dict = {x: _input, supervisor_labels_placeholder: _result}
t_feed_dict = {x: _t_input, supervisor_labels_placeholder: _t_result}
#Learning
for i in range(1000000):
sess.run(training_op, feed_dict=feed_dict)
# if i % 100 == 0:
# #Output the error from the target value
# # print sess.run(loss, feed_dict=feed_dict)
# pass
# if i % 1000 == 0:
# print sess.run(get_accuracy(y, supervisor_labels_placeholder), feed_dict=t_feed_dict)
# summary_writer = tf.train.SummaryWriter('data', graph_def=sess.graph_def)
#Output the result for each learning
print sess.run(get_accuracy(y, supervisor_labels_placeholder), feed_dict=t_feed_dict)
#Define a function that determines the accuracy of learning
accuracy = get_accuracy(y, supervisor_labels_placeholder)
#Result output
print "~~~~~~~~~~result~~~~~~~~~~"
print sess.run(accuracy, feed_dict=t_feed_dict)
Concept of code division of TensorFlow TensorFlow Tutorial MNIST For ML Beginners did
Recommended Posts