# -*- coding:utf-8 -*-

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import sys
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import lesson3.mnist_model as mnist_model

FLAGS = None

def main(_):
    MNIST = input_data.read_data_sets(FLAGS.data_dir,one_hot=True)
    # 输入变量，mnist图片大小为28*28
    X = tf.placeholder(tf.float32,[None,784])
    # 输出变量，数字是1-10
    Y_ = tf.placeholder(tf.float32,[None,10])
    # 构建网络，输入—>第一层卷积—>第一层池化—>第二层卷积—>第二层池化—>第一层全连接—>第二层全连接
    h_fc2, keep_prob = mnist_model.deepnn(X)
    # 第一步对网络最后一层的输出做一个softmax，第二步将softmax输出和实际样本做一个交叉熵
    # cross_entropy返回的是向量
    with tf.name_scope("loss"):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=h_fc2)
    # 求cross_entropy向量的平均值得到交叉熵
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # AdamOptimizer是Adam优化算法：一个寻找全局最优点的优化算法，引入二次方梯度校验
    with tf.name_scope("adam_optimizer"):
        train_step = tf.train.AdamOptimizer(0.0001).minimize(cross_entropy_mean)

    # 在测试集上计算精度
    with tf.name_scope("accuracy"):
        correct_prediction = tf.equal(tf.argmax(h_fc2,1),tf.argmax(Y_,1))
        correct_prediction = tf.cast(correct_prediction,tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    # 将神经网络图模型保存本地，可以通过浏览器查看可视化网络结构
    graph_location = tempfile.mkdtemp(dir=r"E:\testDir\ml\model")
    print('Saving graph to: %s' % graph_location)
    train_writer = tf.summary.FileWriter(graph_location)
    train_writer.add_graph(tf.get_default_graph())

    # 将训练的网络保存下来
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(5000):
            batch = MNIST.train.next_batch(50)
            if i%100 ==0:
                train_accuracy = sess.run(accuracy,feed_dict={X:batch[0],Y_:batch[1],keep_prob:0.5})
                print('step %d, training accuracy %g' % (i, train_accuracy))
            train_step.run(feed_dict={X: batch[0], Y_: batch[1], keep_prob: 0.5})

        test_accuracy = 0
        for i in range(200):
            batch = MNIST.test.next_batch(50)
            test_accuracy += sess.run(accuracy,feed_dict={X:batch[0],Y_:batch[1],keep_prob:1.0})/200
        print('test accuracy %g' % test_accuracy)
        save_path = saver.save(sess, r"E:\testDir\ml\model\mnist\mnist_cnn_model.ckpt")
    return 0

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_dir",type=str,default=r"E:\testDir\ml\trainData\mnist",help="Directory for string input data")
    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]]+ unparsed)