遇到了一個(gè)報(bào)錯(cuò),大家來(lái)幫幫忙
各位有遇到這個(gè)報(bào)錯(cuò)的情況嗎?
import?os import?model import?tensorflow?as?tf import?input_data data?=?input_data.read_data_sets('MNIST_data',?one_hot=True) #?model with?tf.variable_scope("convolutional"): ????x?=?tf.placeholder(tf.float32,?[None,?784],?name='x') ????keep_prob?=?tf.placeholder(tf.float32) ????y,?variables?=?model.convolutional(x,?keep_prob) #?train y_?=?tf.placeholder(tf.float32,?[None,?10],?name='y') cross_entropy?=?-tf.reduce_sum(y_?*?tf.log(y)) train_step?=?tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction?=?tf.equal(tf.argmax(y,?1),?tf.argmax(y_,?1)) accuracy?=?tf.reduce_mean(tf.cast(correct_prediction,?tf.float32)) saver?=?tf.train.Saver(variables) with?tf.Session()?as?sess: ????merged_summary_op?=?tf.summary.merge_all() ????summary_writer?=?tf.summary.FileWriter('/tmp/mnist_log/1',?sess.graph) ????summary_writer.add_graph(sess.graph) ????sess.run(tf.global_variables_initializer()) ????for?i?in?range(20000): ????????batch?=?data.train.next_batch(50) ????????if?i?%?100?==?0: ????????????????#?發(fā)現(xiàn)了,是這里末尾少寫參數(shù)了,加上?<?,?keep_prob:?1.0?>就ok啦 ????????????train_accuracy?=?accuracy.eval(feed_dict={x:?batch[0],?y_:?batch[1],?keep_prob:?1.0}) ????????????print("Step?%d,?training?accuracy?%g"?%?(i,?train_accuracy)) ????????sess.run(train_step,?feed_dict={x:?batch[0],?y_:?batch[1],?keep_prob:?0.5}) ????print(sess.run(accuracy,?feed_dict={x:?data.test.images,?y_:?data.test.labels,?keep_prob:?1.0})) ????path?=?saver.save( ????????sess,?os.path.join(os.path.dirname(__file__),?'data_convolutional',?'convolutional.ckpt'), ????????write_meta_graph=False,?write_state=False) ????print("Saved:?",?path)
2018-06-29
2018-06-23
show me the code