from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

from prepocess.utils import get_file, get_batch_tensor, BatchGenerator
from load_list_to_img_batch import path_list_to_img_batch


def case_use_data_set():
    file = "./MNIST"
    mnist = input_data.read_data_sets(file, one_hot=True)

    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
    for i in range(1000):
        batch = mnist.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))


def case_wrapper():
    file = "./MNIST"
    mnist = input_data.read_data_sets(file, one_hot=True)
    graph = tf.Graph()
    with graph.as_default():
        x = tf.placeholder(tf.float32, shape=[None, 784])
        y_ = tf.placeholder(tf.float32, shape=[None, 10])
        W = tf.Variable(tf.zeros([784, 10]))
        b = tf.Variable(tf.zeros([10]))
        y = tf.nn.softmax(tf.matmul(x, W) + b)
        loss = -tf.reduce_sum(y_ * tf.log(y))
        # loss = -tf.reduce_mean(y_ * tf.log(y))
        opt = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    with tf.Session(graph=graph) as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(1000):
            batch = mnist.train.next_batch(50)
            tra_loss, tra_opt = sess.run([loss, opt], feed_dict={x: batch[0], y_: batch[1]})

            if i % 100 == 0:
                correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                print("step:{} ,loss:{} , acc:{}".format(i, tra_loss, sess.run(accuracy,
                                                                               feed_dict={x: mnist.test.images,
                                                                                          y_: mnist.test.labels})))


def case_use_pic():
    BATCH_SIZE = 32
    CLASS_NUM = 10
    WIDTH = 28
    HEIGHT = 28
    CHANNEL = 1
    train, train_label, val, val_label, class_names_to_ids, ids_to_class_names = get_file("../MNIST_lit")
    train_data_provider = BatchGenerator(train, train_label, BATCH_SIZE, CLASS_NUM)
    # val_data_provider = BatchGenerator(val, val_label, BATCH_SIZE, CLASS_NUM)
    val_data_provider = BatchGenerator(train, train_label, BATCH_SIZE, CLASS_NUM)

    graph = tf.Graph()
    with graph.as_default():
        train_img_placeholder = tf.placeholder(tf.string, shape=[None, ])
        train_label_placeholder = tf.placeholder(tf.int32, shape=[None, CLASS_NUM])
        cur_batch_size = tf.shape(train_img_placeholder)[0]

        img_batch = path_list_to_img_batch(train_img_placeholder, WIDTH, HEIGHT, CHANNEL)

        x = tf.reshape(img_batch, [cur_batch_size, 784])
        x = tf.cast(x, dtype=tf.float32)
        y_ = tf.cast(train_label_placeholder, dtype=tf.float32)

        W = tf.Variable(tf.zeros([784, 10]))
        b = tf.Variable(tf.zeros([10]))
        y = tf.nn.softmax(tf.matmul(x, W) + b)
        loss = -tf.reduce_sum(y_ * tf.log(y))
        # loss = -tf.reduce_mean(y_ * tf.log(y))
        opt = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    with tf.Session(graph=graph) as sess:
        try:
            sess.run(tf.global_variables_initializer())
            coord = tf.train.Coordinator()  # 设置多线程协调器
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            print("session started ...")
            epoch = 0
            last_epoch = 0
            while epoch < 1000:
                train_batch = train_data_provider.next_batch()
                tra_loss, tra_opt = sess.run([loss, opt], feed_dict={train_img_placeholder: train_batch[0],
                                                                     train_label_placeholder: train_batch[1]})
                epoch = train_batch[2]
                # print(epoch)
                if epoch % 3 == 0 and last_epoch != epoch:
                    out_acc = 0
                    loop_cnt = 0
                    start_epoch = 0
                    while True:
                        val_batch = val_data_provider.next_batch()
                        val_epoch = val_batch[2]
                        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
                        accuracy_new = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                        ret_acc_new = sess.run(accuracy_new, feed_dict={train_img_placeholder: val_batch[0],
                                                          train_label_placeholder: val_batch[1]})
                        if loop_cnt == 0:
                            start_epoch = val_epoch
                            out_acc = ret_acc_new
                        out_acc = (ret_acc_new+out_acc)/2
                        # print("loop cnt:{} batch acc :{}".format(loop_cnt,ret_acc_new))
                        if start_epoch != val_epoch:
                            break
                        loop_cnt +=1
                    print("epoch:{} ,loss:{}, acc:{}".format(epoch, tra_loss,out_acc))
                last_epoch = epoch
        except Exception as ex:
            print(ex)
        finally:
            coord.request_stop()
        coord.join()
        sess.close()


if __name__ == '__main__':
    # case_use_data_set()
    # case_wrapper()
    case_use_pic()
