import tensorflow as tf
import os
from cn.redguest.pbase.model.Convolution import Convolution2D as Convolution
from cn.redguest.pbase.model.Convolution import *
from cn.redguest.pbase.model.Dense import *
from cn.redguest.pbase.model.Processor import *

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

'''
此实例包含了卷积网络
'''


def main():
    bat_size = 20
    train_num = 2000000
    learning_speed = 0.0003

    tf_number_type = tf.float32
    # np_number_type = np.float32

    x = tf.placeholder(tf_number_type)
    y = tf.placeholder(tf_number_type)
    keep = tf.placeholder(tf_number_type)

    cnn1 = Convolution(x, [5, 5, 1, 32], [1, 1, 1, 1], "SAME", tf=tf, d_type=tf_number_type)
    r1 = Relu(cnn1.y)
    max1 = MaxPool(r1.y, [1, 2, 2, 1], [1, 2, 2, 1], "SAME", tf=tf, d_type=tf_number_type)

    cnn2 = Convolution(max1.y, [5, 5, 32, 64], [1, 1, 1, 1], "SAME", tf=tf, d_type=tf_number_type)
    r2 = Relu(cnn2.y)
    max2 = MaxPool(r2.y, [1, 2, 2, 1], [1, 2, 2, 1], "SAME", d_type=tf_number_type)

    dense1 = Dense(tf.reshape(max2.y, [-1, 7 * 7 * 64]), 7 * 7 * 64, 1024)
    bn1 = BatchNormalization(dense1.y, [1024])
    relu3 = ELU(bn1.y)
    drop1 = Dropout(relu3.y, keep)

    dense2 = Dense(drop1.y, 1024, 10)
    softmax1 = Softmax(dense2.y)

    loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(softmax1.y), reduction_indices=[1]))
    l2 = L2Regularizer(dense1.Weights, 0.01, tf)
    l22 = L2Regularizer(dense2.Weights, 0.01, tf)
    loss = l2.y + loss + l22.y

    train = tf.train.AdamOptimizer(learning_speed).minimize(loss)

    correct_prediction = tf.equal(tf.argmax(softmax1.y, 1), tf.argmax(y, 1))  # 计算准确度
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    init = tf.global_variables_initializer()
    s = tf.Session()

    bn1_scale_summary = tf.summary.scalar("bn1_scale", tf.reduce_mean(tf.square(bn1.scale)))
    bn1_shift_summary = tf.summary.scalar("bn1_shift", tf.reduce_sum(tf.square(bn1.shift)))
    accuracy_summary = tf.summary.scalar("accuracy", accuracy)
    loss_summary = tf.summary.scalar("loss", loss)

    writer = tf.summary.FileWriter('logs', s.graph, flush_secs=10)
    s.run(init)

    saver = tf.train.Saver()
    if not os.path.exists("model"):  os.mkdir("model")
    if not os.path.exists(os.path.join("model", "cnn")): os.mkdir(os.path.join("model", "cnn"))
    if not os.path.exists(os.path.join("model", "cnn", "MNIST_dataset")): os.mkdir(
        os.path.join("model", "cnn", "MNIST_dataset"))
    last_check_point = tf.train.latest_checkpoint(os.path.join("model", "cnn", "MNIST_dataset"))
    if last_check_point is not None:
        print("加载：" + str(last_check_point))
        saver.restore(s, last_check_point)

    for i in range(train_num):
        next_bat = mnist.train.next_batch(bat_size)
        batch_image = next_bat[0].reshape([-1, 28, 28, 1])
        batch_label = next_bat[1]
        s.run(train, feed_dict={
            x: batch_image,
            y: batch_label,
            keep: 0.5
        })

        loss_summary_value = s.run(loss_summary, feed_dict={x: batch_image,
                                                            y: batch_label,
                                                            keep: 0.5})
        writer.add_summary(loss_summary_value, i)

        accuracy_summary_value = s.run(accuracy_summary, feed_dict={x: batch_image,
                                                                    y: batch_label, keep: 1.0
                                                                    })
        writer.add_summary(accuracy_summary_value, i)

        bn1_scale_summary_value = s.run(bn1_scale_summary, feed_dict={x: batch_image,
                                                                      y: batch_label,
                                                                      keep: 0.5})
        writer.add_summary(bn1_scale_summary_value, i)

        bn1_shift_summary_value = s.run(bn1_shift_summary, feed_dict={x: batch_image,
                                                                      y: batch_label,
                                                                      keep: 0.5})
        writer.add_summary(bn1_shift_summary_value, i)

        if i % 100 == 0:
            train_accuracy = accuracy.eval(session=s, feed_dict={
                x: batch_image, y: batch_label, keep: 1.0})
            print("step %d, training accuracy %.2f" % (i, float(train_accuracy)))
        if i % 1000 == 0:
            saver.save(s,os.path.join("model","cnn","MNIST_dataset","cnn"))


if __name__ == "__main__":
    main()
