Training Data Eval:
Num examples: 55000 Num correct: 52015 Precision @ 1: 0.9457
Validation Data Eval:
Num examples: 5000 Num correct: 4740 Precision @ 1: 0.9480
Test Data Eval:
Num examples: 10000 Num correct: 9456 Precision @ 1: 0.9456

 import tensorflow as tf
import input_data
import math NUM_CLASSES = 10
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.') def inference(images, hidden1_units, hidden2_units):
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits def loss(logits, labels):
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss def training(loss, learning_rate):
tf.scalar_summary(loss.op.name, loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op def evaluation(logits, labels):
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32)) def placeholder_inputs(batch_size):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder def fill_feed_dict(data_set, images_pl, labels_pl):
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
true_count = 0
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in range(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision)) def run_training():
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
print(FLAGS.train_dir, FLAGS.fake_data)
with tf.Graph().as_default():
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
logits = inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
loss_minist = loss(logits, labels_placeholder)
train_op = training(loss_minist, FLAGS.learning_rate)
eval_correct = evaluation(logits, labels_placeholder)
summary = tf.merge_all_summaries()
init = tf.initialize_all_variables()
sess = tf.Session()
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
sess.run(init)
for step in range(FLAGS.max_steps):
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
_, loss_value = sess.run([train_op, loss_minist],
feed_dict=feed_dict) if step % 100 == 0:
print('Step %d: loss = %.2f' % (step, loss_value))
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
run_training()

最新文章

  1. H5的一些小问题
  2. LPC2478调试___ads常见错误分析
  3. QT QString类
  4. c# 闭包 小例
  5. IOS基础——静态方法(类方法)和实例方法
  6. HDU4513吉哥系列故事――完美队形II(manacher算法)
  7. bzoj2431:[HAOI2009]逆序对数列
  8. Block介绍(一)基础
  9. MySQL数据库中,使用游标循环遍历
  10. python快速搭建WebServer
  11. 怎样从一个DLL中导出一个C++类
  12. 利用cmake来搭建开发环境
  13. 【iOS】用Layer创建一个三维模型以及拖动
  14. cocos2dX 音乐和声音效果
  15. JavaScript实例技巧精选(14)—动态变化背景颜色
  16. python调用SOA服务
  17. EBS技术开发之返利开发
  18. 什么是Java Bean
  19. 开放源代码的设计层面框架Spring——day03
  20. 框架——flask知识点回顾

热门文章

  1. solr学习之入门篇
  2. 玩转无线电 -- 温哥华天车 RFID 票务系统
  3. 数据结构《9》----Threaded Binary Tree 线索二叉树
  4. Balance_01背包
  5. iOS app的破解原理,就是去除已付费的账户信息的原理是什么?
  6. 设置dt height 保证dd在同一行
  7. 第二个Sprint冲刺第十天
  8. AttributeError: 'module' object has no attribute 'TornadoAsyncNotifier'
  9. C++ 可变参函数实现
  10. (实用篇)PHP缓存类完整实例