本文共 2149 字,大约阅读时间需要 7 分钟。
#TensorFlow手写数目识别import tensorflow.examples.tutorials.mnist as inputDataimport tensorflow as tf#导入命令行解析模块import argparseimport sys#Import datadata_dir='/tmp/tensorflow/mnist/input_data'mnist=inputData.input_data.read_data_sets(data_dir,one_hot=True)#Create model (x,W,b, Loss)# The image size is of 28*28#None means any lengthx=tf.placeholder(tf.float32,[None, 784])#W bW=tf.Variable(tf.zeros([784,10]))b=tf.Variable(tf.zeros([10]))#Predicty=tf.matmul(x,W)+b#Define loss and optimizery_=tf.placeholder(tf.float32,[None,10]) # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), # reduction_indices=[1])) # tf.reduce_sum adds the elements in the second dimension of y, # due to the reduction_indices=[1] parameter. # tf.reduce_mean computes the mean over all the examples in the batch. # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch.loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))optimizer=tf.train.GradientDescentOptimizer(0.5).minimize(loss)#Start Sessionsess=tf.InteractiveSession()#initial variablestf.global_variables_initializer().run()#Train --stochastic trainingfor _ in range(100): # 100 data points are randomly selected from the training data set batch_x,batch_y=mnist.train.next_batch(100) #trraining sess.run(optimizer,feed_dict={x:batch_x,y_:batch_y})#Test#tf.equal: check if the pridction equals the label, if equal, True; else, False#tf.argmax: obtain the opsition of the max value in row(1)correct_prdiction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))#tf.cast: convert correct_prdiction to tf.float32accuracy=tf.reduce_mean(tf.cast(correct_prdiction,tf.float32))print(sess.run(accuracy,feed_dict={x:mnist.test.images, y_:mnist.test.labels}))运行结果:0.8868
转载地址:http://qmjti.baihongyu.com/