'''
说明书：
1） 这是一个实现了多类目分类的DNN
2） 用到的激活函数是再分类中目前最好用的sigmoid函数
3） 只需要把inputs和labels按照[[],[],[]]的形式输入，然后直接进行Train即可
4)  需要直接做预测的时候就可以把prediction设置为1，现在默认的是以整个数据集的第一个手写字母来做预测.
'''


import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import easygui as eg
import numpy as np


#手写数字的图片 数据导入
mnist = input_data.read_data_sets('data/', one_hot=True)

'''
print(mnist.train.num_examples)
# x = 55000,784, y = 55000,10
print(mnist.train.images.shape,'\n',mnist.train.labels.shape)
'''
##通过神经网络完成分类问题

# 建一个4层神经网络

n_input = 784
n_unit_hidden_1 = 256
n_unit_hidden_2 = 128
n_classes = 10

lr = 0.01
batch_size = 100
display_step = 1


saveOrNot = 1
loadOrNot = 1
prediction = 1
saver_file = r"F:/Python/DNN/data/model/dfy_model"



### 训练过程

def train():
        
    #由于样本太多，所以使用部分输入，占位符的概念
    x = tf.placeholder(tf.float32, shape=[None, n_input], name='x')
    y = tf.placeholder(tf.float32, shape=[None, n_classes], name='y')

    weights = {
        'w1':tf.Variable(tf.random_normal(shape=[n_input, n_unit_hidden_1], mean=0.0, stddev = 0.1)),
        'w2':tf.Variable(tf.random_normal(shape=[n_unit_hidden_1, n_unit_hidden_2], mean=0.0, stddev = 0.1)),
        'out':tf.Variable(tf.random_normal(shape=[n_unit_hidden_2, n_classes], mean=0.0, stddev = 0.1)),
        }

    biases = {
        'b1':tf.Variable(tf.random_normal(shape=[n_unit_hidden_1], mean=0.0, stddev = 0.1)),
        'b2':tf.Variable(tf.random_normal(shape=[n_unit_hidden_2], mean=0.0, stddev = 0.1)),
        'out':tf.Variable(tf.random_normal(shape=[n_classes], mean=0.0, stddev = 0.1)),
        }


    def multilayer_perceotron(_x,_weights, _biases):
        layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_x,_weights['w1']),_biases['b1']))
        layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1,_weights['w2']),_biases['b2']))
        out = tf.add(tf.matmul(layer2,_weights['out']),_biases['out'])
        return out
        

    #预测值
    act = multilayer_perceotron(x, weights, biases)

    #模型的损失函数

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=act, labels=y))

    train = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(cost)

    pred = tf.equal(tf.argmax(act, axis=1), tf.argmax(y, axis=1)) #axis=1 表示是按每一行来计算

    acc = tf.reduce_mean(tf.cast(pred, dtype=tf.float32))

    init_op = tf.global_variables_initializer()


    with tf.Session() as sess:
        sess.run(init_op)

        saver = tf.train.Saver()
        
        (loadOrNot==1 and saver.restore(sess, saver_file) or 0)
        
        epoch = 0
        while True:
            avg_cost = 0
            total_batch = int(mnist.train.num_examples / batch_size)

            for i in range(total_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                feeds = {x:batch_xs, y:batch_ys}
                sess.run(train, feed_dict=feeds)
                avg_cost += sess.run(cost, feed_dict=feeds)

            avg_cost = avg_cost / total_batch

            if (epoch + 1) % display_step == 0:
                print('批次:%03d 损失函数值：%.9f' % (epoch, avg_cost))
                train_acc = sess.run(acc, feed_dict={x:mnist.train.images, y:mnist.train.labels})
                print('训练集的准确率:%0.3f' % train_acc)
                test_acc = sess.run(acc, feed_dict={x:mnist.test.images, y:mnist.test.labels})
                print('测试集的准确率:%0.3f' % test_acc)

                if train_acc > 0.9 and test_acc > 0.9:
                    saver.save(sess,'./data/model/dfy_model')
                    break
            epoch += 1

        (saveOrNot==1 and print("Save to path:", saver.save(sess,saver_file)) or 0)
                                                                                                                                                                
        #做预测：
        def prediction():
            #prediction_value = sess.run(act, feed_dict={x:mnist.train.images})
            #由于原输入值是list嵌套list,所以再再一次输入的时候需要也把list嵌套进入一个空的list即可。
            prediction_value = sess.run(act, feed_dict={x:[mnist.train.images[0]]})
            newlist = []
            # 假设第一行的数据就是要预测的数据：
            for i in prediction_value[0]:   
                if i!=max(prediction_value[0]):
                    newlist.append(0)
                else:
                    newlist.append(1)
            print('预测值原值：',prediction_value[0],'\n预测值可视版:',newlist, '\n正确值：',mnist.train.labels[0])

        (prediction==1 and prediction() or 0)
        
        #保存预测图
        writer = tf.summary.FileWriter('./data/graph', tf.get_default_graph())
        writer.close()
        pass

#进行训练
train()

# 调用已经训练好的NN来预测
x = None
y = None
sess = tf.Session()


eg.msgbox("Hey, Susu has the solution now!")



'''
#如何调用已经训练好的模型

def load_model():
    global x,y
    modelpath = r'F:/Python/DNN/data/model/'  #你需要调用的model地址
    saver = tf.train.import_meta_graph(modelpath + 'dfy_model.meta')  #你需要调用的model的meta文件
    saver.restore(sess, tf.train.latest_checkpoint(modelpath))
    graph = tf.get_default_graph()
    print(graph)
    x = graph.get_tensor_by_name('x:0')
    y = graph.get_tensor_by_name('tanh:0')
    print('Successfully loaded the pre-trained model!')


def inputs(inputs):
    data = mnist.train.images[inputs]
    correct = mnist.train.labels[inputs]
    return data

def do_prediction(inputs):
    load_model()
    testdata = inputs(inputs)
    print(testdata)

#do_prediction(inputs(0))
'''

