import os
import numpy as np
import tensorflow as tf
import datetime
import random
from . import model, get_data
from hte.settings import MEDIA_ROOT

# you need to change the directories to yours.
# train_dir = 'img/'
# test_dir = 'test/'
#
#
# train_logs_dir = './logs/train/'
# val_logs_dir = './logs/val'
#
#
# N_CLASSES = 2
# IMG_W = 208  # resize the image, if the input image is too large, training will be very slow.
# IMG_H = 208
# RATIO = 0.2  # take 20% of dataset as validation data
# BATCH_SIZE = 64
# CAPACITY = 2000
# MAX_STEP = 5000  # with current parameters, it is suggested to use MAX_STEP>10k
# learning_rate = 0.0001  # with current parameters, it is suggested to use learning rate<0.0001


def pic_training(pic_list, classes):
    train_logs_dir = general_dir('train')
    val_logs_dir = general_dir('val')
    model_dir = {
        'train_dir': train_logs_dir,
        'val_dir': val_logs_dir,
    }
    IMG_W = 208  # resize the image, if the input image is too large, training will be very slow.
    IMG_H = 208
    RATIO = 0.2  # take 20% of dataset as validation data
    BATCH_SIZE = 10
    CAPACITY = 2000
    MAX_STEP = 100  # with current parameters, it is suggested to use MAX_STEP>10k
    learning_rate = 0.0001  #
    n_classes = len(pic_list)
    train, train_label, val, val_label, name_to_label = get_data.get_files(pic_list, classes)
    train_batch, train_label_batch = get_data.get_batch(train,
                                                        train_label,
                                                        IMG_W,
                                                        IMG_H,
                                                        BATCH_SIZE,
                                                        CAPACITY)
    val_batch, val_label_batch = get_data.get_batch(val,
                                                    val_label,
                                                    IMG_W,
                                                    IMG_H,
                                                    BATCH_SIZE,
                                                    CAPACITY)

    logits = model.inference(train_batch, BATCH_SIZE, n_classes)
    loss = model.losses(logits, train_label_batch)
    train_op = model.trainning(loss, learning_rate)
    acc = model.evaluation(logits, train_label_batch)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE])

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(train_logs_dir, sess.graph)
        val_writer = tf.summary.FileWriter(val_logs_dir, sess.graph)
        result = {}
        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                tra_images, tra_labels = sess.run([train_batch, train_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, acc],
                                                feed_dict={x: tra_images, y_: tra_labels})
                # result['train_loss'] = tra_loss
                result['train_loss'] = str(tra_loss)
                # result['train_acc'] = tra_acc
                result['train_acc'] = str(tra_acc)
                if step % 50 == 0:
                    print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)

                if step % 200 == 0 or (step + 1) == MAX_STEP:
                    val_images, val_labels = sess.run([val_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, acc],
                                                 feed_dict={x: val_images, y_: val_labels})
                    print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' % (step, val_loss, val_acc * 100.0))
                    summary_str = sess.run(summary_op)
                    val_writer.add_summary(summary_str, step)

                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(train_logs_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
                if (step + 1) == MAX_STEP:
                    print("*************************************")
                    print("All train steps done!")
                    val_images, val_labels = sess.run([val_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, acc],
                                                 feed_dict={x: val_images, y_: val_labels})
                    # result['val_loss'] = val_loss
                    result['val_loss'] = str(val_loss)
                    # result['val_acc'] = val_acc
                    result['val_acc'] = str(val_acc)
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
        return result, model_dir, name_to_label


def get_one_image(file_dir):
    """
    Randomly pick one image from test data
    Return: ndarray
    """
    from PIL import Image
    # import matplotlib.pyplot as plt
    # test = []
    # for file in os.listdir(file_dir):
    #     test.append(os.path.join(file_dir, file))
    # print('There are %d test pictures\n' % (len(test)))
    #
    # n = len(test)
    # ind = np.random.randint(0, n)
    # # print(ind)
    # img_test = test[ind]

    image = Image.open(file_dir)
    # plt.imshow(image)
    image = image.resize([208, 208])
    image = np.array(image)
    return image


def img_to_list(file_dir):
    cats = []
    dogs = []
    for file in os.listdir(file_dir):
        name = file.split(sep='.')
        if name[0] == 'cat':
            cats.append(os.path.join(file_dir,file))
        elif name[0] == 'dog':
            dogs.append(os.path.join(file_dir,file))

    pic_list = [cats, dogs]
    return pic_list


def general_dir(name: str = 'train'):
    random_str = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
    now_time = datetime.datetime.now()
    r_str = ''
    for i in range(10):
        r_str += random_str[random.randint(0, len(random_str) - 1)]
    random_dir = os.path.join(MEDIA_ROOT, name, str(now_time.year),
                              str(now_time.month),
                              str(now_time.day),
                              r_str)
    if os.path.exists(random_dir):
        random_dir = general_dir()
    return random_dir


# Test one image
def test_images(train_dir, test_dir, name_to_lable):
    """
    Test one image with the saved models and parameters
    """
    result = []
    # for pic_one in os.listdir(test_dir):
    #     pic = os.path.join(test_dir, pic_one)
    #     test_image = get_one_image(pic)
    for pic_one in test_dir:
        test_image = get_one_image(pic_one)

        with tf.Graph().as_default():
            BATCH_SIZE = 1
            N_CLASSES = len(name_to_lable)

            image = tf.cast(test_image, tf.float32)
            image = tf.image.per_image_standardization(image)
            image = tf.reshape(image, [1, 208, 208, 3])
            logit = model.inference(image, BATCH_SIZE, N_CLASSES)

            logit = tf.nn.softmax(logit)

            x = tf.placeholder(tf.float32, shape=[208, 208, 3])

            saver = tf.train.Saver()

            with tf.Session() as sess:

                # print("Reading checkpoints...")
                ckpt = tf.train.get_checkpoint_state(train_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # print('Loading success, global_step is %s' % global_step)
                else:
                    # print('No checkpoint file found')
                    return result
                prediction = sess.run(logit, feed_dict={x: test_image})
                max_index = np.argmax(prediction)
                # print('This is a {0} with possibility {1:.2%}'.format(name_to_lable[max_index], prediction[:, 0][0]))
                result.append(name_to_lable[max_index])
    return result


if __name__ == '__main__':
    # pic_list = img_to_list(train_dir)
    # result, dirs, name_to_label = pic_training(pic_list, ['cat', 'dog'])
    # print(result)
    # print(dirs)
    # print(name_to_label)
    result = test_images('./train/2019/3/11/2tpSmLDtEO', './test', {0: 'cat', 1: 'dog'})
    print(result)
