import time
import tensorflow as tf
import numpy as np
import pickle
# from model import VisionTransformer
import os


os.environ['CUDA_VISIBLE_DEVICES'] = '1'
filename = "./dataset/train"
val_filename = "./dataset/test"
np.random.seed(9527)

HPARAMS = {
    "batch_size": 320,
    'learning_rate': 0.0001,
}

VIT_BASE = {
    "patch_size": 16,
    "num_layers": 12,
    "num_classes": 100,
    "d_model": 768,
    "num_heads": 12,
    "mlp_dim": 3072,
    "channels": 3,
    "dropout": 0.1
}


def read_data(filename, training):

    with open(filename, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    if training:
        images = dict[b'data'].reshape([50000, 3, 32, 32])
    else:
        images = dict[b'data'].reshape([10000, 3, 32, 32])
    images = np.transpose(images, [0, 2, 3, 1])
    images = images.astype(np.float32)
    labels = np.array(dict[b'fine_labels'])

    def _augment(image, label):
        if np.random.rand() < 0.3:
            image = tf.image.flip_left_right(image)
        if np.random.rand() < 0.3:
            image = tf.image.flip_up_down(image)
        if np.random.rand() < 0.3:
            image = tf.image.random_contrast(image, lower=0.5, upper=2)
        return image, label

    def _preprocess(image, label):
        image = image / 255.0
        return image, label
    ds = tf.data.Dataset.from_tensor_slices((images, labels))
    if training:
        ds = ds.map(_augment)
        ds = ds.map(_preprocess)
        ds = ds.shuffle(HPARAMS['batch_size'] * 10)
        ds = ds.repeat()
    else:
        ds = ds.map(_preprocess)
        ds = ds.repeat()

    ds = ds.batch(batch_size=HPARAMS['batch_size'], drop_remainder=True)
    iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
    image_batch, label_batch = iterator.get_next()
    print("wok on it =================================")
    return image_batch, label_batch


def simple_model(features, training):
    """CNN classifier model."""
    params = {"drop_rate": 0.5, "num_classes": 100, }
    drop_rate = tf.cond(training, lambda: params['drop_rate'], lambda: 0.0)
    for i, filters in enumerate([32, 64, 128]):
        features = tf.layers.conv2d(
            features, filters=filters, kernel_size=3, padding="same",
            name="conv_%d" % (i + 1))
        features = tf.layers.max_pooling2d(
            inputs=features, pool_size=2, strides=2, padding="same",
            name="pool_%d" % (i + 1))

    features = tf.contrib.layers.flatten(features)

    features = tf.layers.dropout(features, drop_rate)
    features = tf.layers.dense(features, 512, name="dense_1")

    features = tf.layers.dropout(features, drop_rate)
    logits = tf.layers.dense(features, params['num_classes'], activation=None,
                             name="dense_2")

    return logits


model = simple_model
# model = VisionTransformer(
#     image_size=32,
#     patch_size=16,
#     num_layers=12,
#     num_classes=100,
#     d_model=768,
#     num_heads=12,
#     mlp_dim=3072,
#     channels=3,
#     dropout=0.1
# )


def calc(prediction, label):
    a = [prediction[i] == label[i] for i in range(len(prediction))]
    return sum(a) / len(a)


def eval(pred, label):
    prediction = np.argmax(pred, 1).tolist()
    return calc(prediction, label)


loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)


inputx = tf.compat.v1.placeholder(
    tf.float32, shape=[HPARAMS['batch_size'], 32, 32, 3], name="inputx")
inputy = tf.compat.v1.placeholder(
    tf.int64, shape=[HPARAMS['batch_size'], ],  name="inputy")
inputTrain = tf.compat.v1.placeholder(
    tf.bool, name='training')


images_batch, labels_batch = read_data(filename=filename, training=True)
val_image_batch, val_labels_batch = read_data(
    filename=val_filename, training=False)
out = model(inputx, training=inputTrain)
loss = loss_fun(inputy, out)
optimizer = tf.train.AdamOptimizer(
    learning_rate=HPARAMS['learning_rate'], beta1=0.9)
train_op = optimizer.minimize(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)  # 会话
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=10)
saver.restore(sess, "./modeltest/vit-base-23")
for epoch in range(3000):
    # train
    label_col = []
    pred_col = []
    for step in range(50000 // HPARAMS['batch_size']):
        s = time.time()
        x_in, y_in = sess.run([images_batch, labels_batch],
                              feed_dict={inputTrain: True})
        out_, loss_, _ = sess.run([out, loss, train_op], feed_dict={
            inputx: x_in, inputy: y_in, inputTrain: True})
        label_col += y_in.tolist()
        pred_col += np.argmax(out_, 1).tolist()
        if step % 10 == 0:
            print("epoch:{}  step: {} , loss:  {:.4f} ,  time: {:.4f}  acc: {:.4f}".format(
                epoch, step, loss_.item(), time.time()-s, eval(out_, y_in)))  # print 到文件里面
    print("Train ACC: {:.4f}".format(calc(pred_col, label_col)))
    # saver.save(sess, "./modeltest/vit-base", global_step=epoch)
    # eval
    if epoch % 10 == 9:
        prediction = []
        labels = []
        for step in range(100000 // HPARAMS['batch_size']):
            x_in, y_in = sess.run(
                [val_image_batch, val_labels_batch], feed_dict={inputTrain: False})
            pred, loss_ = sess.run([out, loss], feed_dict={
                                inputx: x_in, inputy: y_in, inputTrain: False})
            # print(calc(np.argmax(pred, 1).tolist(), y_in.tolist()),
            #       pred.shape, loss_.item())
            prediction += np.argmax(pred, 1).tolist()
            labels += y_in.tolist()
        print("Test ACC:  ", sum([prediction[i] == labels[i]
                                for i in range(len(labels))])/len(labels))
