import time
from matplotlib import pyplot as plt
import tensorflow as tf
import numpy as np
import pickle
from model import VisionTransformer
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
filename = "./dataset/train"
val_filename = "./dataset/test"


def set_seed(seed=9527):
    np.random.seed(seed)
    tf.set_random_seed(seed)


set_seed(9527)

#num2lable= [b'apple', b'aquarium_fish', b'baby', b'bear', b'beaver', b'bed', b'bee', b'beetle', b'bicycle', b'bottle', b'bowl', b'boy', b'bridge', b'bus', b'butterfly', b'camel', b'can', b'castle', b'caterpillar', b'cattle', b'chair', b'chimpanzee', b'clock', b'cloud', b'cockroach', b'couch', b'crab', b'crocodile', b'cup', b'dinosaur', b'dolphin', b'elephant', b'flatfish', b'forest', b'fox', b'girl', b'hamster', b'house', b'kangaroo', b'keyboard', b'lamp', b'lawn_mower', b'leopard', b'lion', b'lizard', b'lobster', b'man', b'maple_tree', b'motorcycle', b'mountain', b'mouse', b'mushroom', b'oak_tree', b'orange', b'orchid', b'otter', b'palm_tree', b'pear', b'pickup_truck', b'pine_tree', b'plain', b'plate', b'poppy', b'porcupine', b'possum', b'rabbit', b'raccoon', b'ray', b'road', b'rocket', b'rose', b'sea', b'seal', b'shark', b'shrew', b'skunk', b'skyscraper', b'snail', b'snake', b'spider', b'squirrel', b'streetcar', b'sunflower', b'sweet_pepper', b'table', b'tank', b'telephone', b'television', b'tiger', b'tractor', b'train', b'trout', b'tulip', b'turtle', b'wardrobe', b'whale', b'willow_tree', b'wolf', b'woman', b'worm']

HPARAMS = {
    "batch_size": 320,
    'learning_rate': 0.0001,
    "weight_decay": 0.5,
    "l2": True,
    "delay_validation": 4,
    "train_img_N": 50000,
    "test_img_N": 10000,
    "num_classes": 100,
    'delay_save': 3
}

VIT_BASE = {
    "patch_size": 16,
    "num_layers": 12,
    "num_classes": HPARAMS["num_classes"],
    "d_model": 768,
    "num_heads": 12,
    "mlp_dim": 3072,
    "channels": 3,
    "dropout": 0.1
}


def _augment(image, label):
    if np.random.rand() < 0.3:
        image = tf.image.flip_left_right(image)
    if np.random.rand() < 0.3:
        image = tf.image.flip_up_down(image)
    if np.random.rand() < 0.3:
        image = tf.image.random_contrast(image, lower=0.5, upper=2)
    return image, label


def _preprocess(image, label):
    image = image / 255.0
    return image, label


def read_train_data(filename, training):

    with open(filename, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    if training:
        images = dict[b'data'].reshape([HPARAMS['train_img_N'], 3, 32, 32])
    else:
        images = dict[b'data'].reshape([HPARAMS['test_img_N'], 3, 32, 32])
    images = np.transpose(images, [0, 2, 3, 1])
    images = images.astype(np.float32)
    labels = np.array(dict[b'fine_labels'])

    ds = tf.data.Dataset.from_tensor_slices((images, labels))
    # ds = ds.map(_augment, num_parallel_calls=4)
    ds = ds.map(_preprocess, num_parallel_calls=4)
    ds = ds.shuffle(HPARAMS['batch_size'] * 10)
    ds = ds.repeat()
    ds = ds.batch(batch_size=HPARAMS['batch_size'], drop_remainder=True)
    iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
    image_batch, label_batch = iterator.get_next()
    print("wok on it =================================")
    return image_batch, label_batch


def read_eval_data(filename, training):

    with open(filename, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    if training:
        images = dict[b'data'].reshape([HPARAMS['train_img_N'], 3, 32, 32])
    else:
        images = dict[b'data'].reshape([HPARAMS['test_img_N'], 3, 32, 32])
    images = np.transpose(images, [0, 2, 3, 1])
    images = images.astype(np.float32)
    labels = np.array(dict[b'fine_labels'])

    ds = tf.data.Dataset.from_tensor_slices((images, labels))
    ds = ds.map(_preprocess, num_parallel_calls=4)
    ds = ds.repeat()
    ds = ds.batch(batch_size=HPARAMS['batch_size'], drop_remainder=True)
    iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
    image_batch, label_batch = iterator.get_next()
    print("wok on it =================================")
    return image_batch, label_batch


model = VisionTransformer(
    image_size=32,
    patch_size=4,
    num_layers=12,
    num_classes=100,
    d_model=64,
    num_heads=4,
    mlp_dim=128,
    channels=3,
    dropout=0.1
)


def calc(prediction, label):
    assert len(prediction) == len(label)
    a = [prediction[i] == label[i] for i in range(len(prediction))]
    return sum(a) / len(a)


def eval(pred, label):
    prediction = np.argmax(pred, 1).tolist()
    return calc(prediction, label)

loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)


inputx = tf.compat.v1.placeholder(
    tf.float32, shape=[HPARAMS['batch_size'], 32, 32, 3], name="inputx")
inputy = tf.compat.v1.placeholder(
    tf.int64, shape=[HPARAMS['batch_size'], ],  name="inputy")
inputTrain = tf.compat.v1.placeholder(
    tf.bool, name='training')


images_batch, labels_batch = read_train_data(filename=filename, training=True)
val_image_batch, val_labels_batch = read_eval_data(
    filename=val_filename, training=False)
out = model(inputx, training=inputTrain)


HPARAMS['weight_decay'] = HPARAMS['weight_decay'] if HPARAMS['l2'] else 0.

cls_loss = loss_fun(inputy, out)
tf.add_to_collection('losses', cls_loss)
l2_loss = tf.add_n(tf.get_collection('losses'))
loss = cls_loss + l2_loss * HPARAMS['weight_decay']


optimizer = tf.train.AdamOptimizer(
    learning_rate=HPARAMS['learning_rate'], beta1=0.9)
train_op = optimizer.minimize(loss)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=10)
# saver.restore(sess, "./modeltest/vit-base-9")
for epoch in range(0,3000):
    # train
    label_col = []
    pred_col = []
    for step in range(HPARAMS['train_img_N'] // HPARAMS['batch_size']):
        s = time.time()
        x_in, y_in = sess.run([images_batch, labels_batch],
                              feed_dict={inputTrain: True})
        out_, loss_, _ = sess.run([out, loss, train_op], feed_dict={
            inputx: x_in, inputy: y_in, inputTrain: True})
        label_col += y_in.tolist()
        pred_col += np.argmax(out_, 1).tolist()
        if step % 100 == 0:
            print("epoch:{}  step: {} , loss:  {:.4f} ,  time: {:.4f}  acc: {:.4f}".format(
                epoch, step, loss_.item(), time.time()-s, eval(out_, y_in)))
    print("Train ACC: {:.4f}".format(calc(pred_col, label_col)))
    if epoch % HPARAMS['delay_save'] == 0:
        saver.save(sess, "./modeltest/vit-base", global_step=epoch)

    if epoch % HPARAMS['delay_validation'] == 2:
        # eval
        prediction = []
        labels = []
        for step in range(HPARAMS['test_img_N'] // HPARAMS['batch_size']):
            val_x_in, val_y_in = sess.run(
                [val_image_batch, val_labels_batch], feed_dict={inputTrain: False})
            pred, _ = sess.run([out, loss], feed_dict={
                inputx: val_x_in, inputy: val_y_in, inputTrain: False})
            prediction += np.argmax(pred, 1).tolist()
            labels += val_y_in.tolist()
        print("Test ACC:  ", calc(prediction, labels))
