import tensorflow as tf
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import input_data
import math
from datetime import datetime
import time
import os
import mymodel
import alexnet

# test_dir = '/Users/new/Documents/JLIFE/project/HUAWEI-Server/scratchDL/FirstModel/pyscratch-500x400/data/test/'
# logs_train_dir = '/Users/new/Documents/JLIFE/project/HUAWEI-Server/scratchDL/FirstModel/pyscratch-500x400/logs/train/'
test_dir = 'D:/scratch/test/testtest/'
logs_train_dir = 'E:/Projects/tensorflow/python/logs/train_learningrate1/'
pb_file_path = 'E:/Projects/tensorflow/python/logs/train_learningrate1/expert-graph.pb'

IMG_W = 200
IMG_H = 200
BATCH_SIZE = 130
NUM_CLASSES = 2
N_TEST = 569
CAPACITY = 2000

def evaluate_ckpt():
                '''Test one image against the saved models and parameters
                '''

                # you need to change the directories to yours.


                with tf.Graph().as_default():
                    train, train_label = input_data.get_files(test_dir)

                    train_batch, train_label_batch = input_data.get_batch(train,
                                                                          train_label,
                                                                          IMG_W,
                                                                          IMG_H,
                                                                          BATCH_SIZE,
                                                                          CAPACITY)
                    # image = tf.image.per_image_standardization(image)
                    image = tf.reshape(train_batch, [BATCH_SIZE, IMG_W, IMG_H, 1])
                    #logit, _ = mymodel.inference(image, BATCH_SIZE, NUM_CLASSES)
                    logit, _ = alexnet.inference(image, BATCH_SIZE, NUM_CLASSES,1.0)
                    correct = mymodel.evaluation(logit, train_label_batch)
                    # x = tf.placeholder(tf.float32, shape=[200, 200, 1])

                    # you need to change the directories to yours.
                    saver = tf.train.Saver()

                    with tf.Session() as sess:

                        print("Reading checkpoints...")
                        ckpt = tf.train.get_checkpoint_state(logs_train_dir)
                        if ckpt and ckpt.model_checkpoint_path:
                            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                            saver.restore(sess, ckpt.model_checkpoint_path)
                            print('Loading success, global_step is %s' % global_step)
                        else:
                            print('No checkpoint file found')
                        coord = tf.train.Coordinator()
                        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                        try:
                            print('\nEvaluating......')
                            for var in tf.trainable_variables():
                                print(var.name)
                            with tf.Graph().as_default() as graph:
                                for op in graph.get_operations():
                                    print(op.name, op.values())
                            num_step = int(math.floor(N_TEST / BATCH_SIZE))
                            step = 0
                            total_acc = 0
                            start_time = time.time()
                            while step < num_step and not coord.should_stop():
                                tra_acc = sess.run(correct)
                                total_acc += np.sum(tra_acc)
                                step += 1
                            duration = time.time() - start_time
                            print('test accuracy = %.2f%%' % (total_acc/num_step*100))
                            print('duration: %.3f' % duration)
                        except Exception as e:
                            coord.request_stop(e)
                        finally:
                            coord.request_stop()
                            coord.join(threads)


def get_one_image(train):
    '''Randomly pick one image from training data
    Return: ndarray
    '''
    n = len(train)
    ind = np.random.randint(0, n)
    img_dir = train[ind]

    image = Image.open(img_dir)
    plt.imshow(image)
    plt.show()
    image = image.resize([200, 200])
    image = np.array(image)
    return image

def evaluate_one_image():
                            '''Test one image against the saved models and parameters
                            '''

                            # you need to change the directories to yours.
                            train_dir = 'D:/scratch/test/testtest/'
                            train, train_label = input_data.get_files(train_dir)
                            image_array = get_one_image(train)

                            with tf.Graph().as_default():

                                image = tf.cast(image_array, tf.float32)
                                # image = tf.image.per_image_standardization(image)
                                image = tf.reshape(image, [1, IMG_W, IMG_H, 1])
                                #logit, _ = mymodel.inference(image, 1, NUM_CLASSES)
                                logit, _ = alexnet.inference(image, BATCH_SIZE, NUM_CLASSES, 1.0)

                                # x = tf.placeholder(tf.float32, shape=[200, 200, 1])

                                # you need to change the directories to yours.
                                logs_train_dir = 'E:/Projects/tensorflow/python/logs/train_learningrate1/'

                                saver = tf.train.Saver()

                                with tf.Session() as sess:

                                    print("Reading checkpoints...")
                                    ckpt = tf.train.get_checkpoint_state(logs_train_dir)
                                    if ckpt and ckpt.model_checkpoint_path:
                                        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                                        saver.restore(sess, ckpt.model_checkpoint_path)
                                        print('Loading success, global_step is %s' % global_step)
                                    else:
                                        print('No checkpoint file found')

                                    prediction = sess.run(logit)
                                    max_index = np.argmax(prediction)
                                    if max_index == 0:
                                        print('This is a normal with possibility %.6f' % prediction[:, 0])
                                    else:
                                        print('This is a scratch with possibility %.6f' % prediction[:, 1])

def evaluate_pb():
                                        with tf.Graph().as_default():
                                            output_graph_def = tf.GraphDef()
                                            #    sess.graph.add_to_collection("input", mnist.test.images)
                                            train, train_label = input_data.get_files(test_dir)

                                            train_batch, train_label_batch = input_data.get_batch(train,
                                                                                                  train_label,
                                                                                                  IMG_W,
                                                                                                  IMG_H,
                                                                                                  BATCH_SIZE,
                                                                                                  CAPACITY)

                                            with open(pb_file_path, "rb") as f:
                                                output_graph_def.ParseFromString(f.read())
                                                tf.import_graph_def(output_graph_def, name="")

                                            with tf.Session() as sess:
                                                # tf.initialize_all_variables().run()
                                                input_x = sess.graph.get_tensor_by_name("input:0")
                                                print(input_x)
                                                output = sess.graph.get_tensor_by_name("output/output:0")
                                                print(output)
                                                correct = mymodel.evaluation(output, train_label_batch)
                                                coord = tf.train.Coordinator()
                                                threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                                                try:
                                                    print('\nEvaluating......')
                                                    for var in tf.trainable_variables():
                                                        print(var.name)
                                                    with tf.Graph().as_default() as graph:
                                                        for op in graph.get_operations():
                                                            print(op.name, op.values())
                                                    num_step = int(math.floor(N_TEST / BATCH_SIZE))
                                                    step = 0
                                                    total_acc = 0
                                                    start_time = time.time()
                                                    while step < num_step and not coord.should_stop():
                                                        tra_images = sess.run(train_batch)
                                                        tra_acc = sess.run(correct, feed_dict={input_x: tra_images})
                                                        print('accuracy = %.2f%%' % (tra_acc * 100))
                                                        total_acc += np.sum(tra_acc)
                                                        step += 1
                                                    duration = time.time() - start_time
                                                    print('test accuracy = %.2f%%' % (total_acc / num_step * 100))
                                                    print('duration: %.3f' % duration)
                                                except Exception as e:
                                                    coord.request_stop(e)
                                                finally:
                                                    coord.request_stop()
                                                    coord.join(threads)

# def evaluate1():
#     with tf.Graph().as_default():
#
#         test_images, test_labels, filename = get_files.get_files(test_dir)
#         images, labels, filenames = get_files.get_batch_test(test_images, test_labels, IMG_W, IMG_H, BATCH_SIZE,
#                                                              CAPACITY, filename)
#
#         logits, pool3 = easy.model_easy(images, BATCH_SIZE, NUM_CLASSES, 1.0)
#
#         labels = tf.cast(labels, tf.int64)
#         correct1 = tf.equal(tf.argmax(logits, 1), labels)
#         correct = lo.num_correct_prediction(logits, labels)
#         saver = tf.train.Saver(tf.global_variables())
#
#         with tf.Session() as sess:
#             print("Reading checkpoints...")
#             ckpt = tf.train.get_checkpoint_state(logs_train_dir)
#             if ckpt and ckpt.model_checkpoint_path:
#                 global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
#                 saver.restore(sess, ckpt.model_checkpoint_path)
#                 print('Loading success, global_step is %s' % global_step)
#             else:
#                 print('No checkpoint file found')
#                 return
#
#             coord = tf.train.Coordinator()
#             threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#
#             try:
#                 print('\nEvaluating......')
#                 num_step = int(math.floor(N_TEST / BATCH_SIZE))
#                 num_sample = num_step * BATCH_SIZE
#                 step = 0
#                 total_correct = 0
#                 while step < num_step and not coord.should_stop():
#                     filenames = sess.run(filenames)
#                     img = sess.run(images)
#                     label = sess.run(labels)
#                     logit = sess.run(logits)
#                     correct2 = sess.run(correct1)
#
#                     for i in range(BATCH_SIZE):
#                         print(label[i])
#                         print(logit[i])
#                         #     #img[i] = img[i].reshape((IMG_W,IMG_H,3))
#                         #     # img[i] = Image.fromarray(img[i], "RGB")
#                         #
#                         #     # plt.imshow(img[i])
#                         #     # plt.show()
#                         #     print(filenames[i])
#                         print(correct2[i])
#
#                         # if correct2[i]:
#                         #     if label[i] == 0:
#                         #         f = open('./data/resultback/normal->right/normal->right.txt', 'a')
#                         #         f.write(str(filenames[i])+'\n')
#                         #         f.close()
#                         #     else:
#                         #         f = open('./data/resultback/scratch->right/scratch->right.txt', 'a')
#                         #         f.write(str(filenames[i])+'\n')
#                         #         f.close()
#                         # else:
#                         #     if label[i] == 0:
#                         #         f = open('./data/resultback/normal->wrong/normal->wrong.txt', 'a')
#                         #         f.write(str(filenames[i])+'\n')
#                         #         f.close()
#                         #     else:
#                         #         f = open('./data/resultback/scratch->wrong/scratch->wrong.txt', 'a')
#                         #         f.write(str(filenames[i])+'\n')
#                         #         f.close()
#
#                     batch_correct = sess.run(correct)
#                     total_correct += np.sum(batch_correct)
#                     step += 1
#
#                 print('Total testing samples: %d' % num_sample)
#                 print('Total correct predictions: %d' % total_correct)
#                 print('Average accuracy: %.2f%%' % (100 * total_correct / num_sample))
#             except Exception as e:
#                 coord.request_stop(e)
#             finally:
#                 coord.request_stop()
#                 coord.join(threads)
#
#



evaluate_pb()