import time
import tensorflow as tf
import numpy as np
import pickle
from model import VisionTransformer
import matplotlib.pyplot as plt

import warnings
warnings.filterwarnings("ignore")
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
filename = "./dataset/test"
HPARAMS = {
    "batch_size": 320,
    'learning_rate': 0.0001,
    "image_N": 10000,
    'training': False
}

VIT_BASE = {
    "patch_size": 16,
    "num_layers": 12,
    "num_classes": 100,
    "d_model": 768,
    "num_heads": 12,
    "mlp_dim": 3072,
    "channels": 3,
    "dropout": 0.1
}


def read_data(filename, training):

    with open(filename, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    images = dict[b'data'].reshape([HPARAMS['image_N'], 3, 32, 32])
    images = np.transpose(images, [0, 2, 3, 1])
    images = images.astype(np.float32)
    labels = np.array(dict[b'fine_labels'])

    def _augment(image,label):
        if np.random.rand() < 0.3:
            image = tf.image.flip_left_right(image)
        if np.random.rand() < 0.3:
            image = tf.image.flip_up_down(image)
        if np.random.rand() < 0.3:
            image = tf.image.random_contrast(image,lower=0.5,upper=2)
        return image,label

    def _preprocess(image, label):
        image = image / 255.0
        return image, label
    ds = tf.data.Dataset.from_tensor_slices((images, labels))
    if training:
        ds = ds.map(_augment)
        ds = ds.map(_preprocess)
        ds = ds.shuffle(HPARAMS['batch_size'] * 10)
        ds = ds.repeat()
    else:
        ds = ds.map(_augment)
        ds = ds.map(_preprocess)
        ds = ds.repeat(1)
    ds = ds.batch(batch_size=HPARAMS['batch_size'], drop_remainder=True)
    iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
    image_batch, label_batch = iterator.get_next()
    return image_batch, label_batch


model = VisionTransformer(
    image_size=32,
    patch_size=16,
    num_layers=12,
    num_classes=100,
    d_model=768,
    num_heads=12,
    mlp_dim=3072,
    channels=3,
    dropout=0.1
)


loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)


images_batch, labels_batch = read_data(
    filename=filename, training=HPARAMS['training'])

inputx = tf.compat.v1.placeholder(
    tf.float32, shape=[HPARAMS['batch_size'], 32, 32, 3], name="inputx")
inputy = tf.compat.v1.placeholder(
    tf.int64, shape=[HPARAMS['batch_size'], ],  name="inputy")

out = model(inputx, training=HPARAMS['training'])
loss = loss_fun(inputy, out)

config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)  # 会话
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./model4/vit-base-63")
prediction = []
labels = []

def visual(x_in,y_in,step,info):
    plt.figure(figsize=(4,4))
    for i in range(8*8):
        plt.subplot(8,8,i+1)
        plt.imshow(x_in[i])
        plt.title(y_in[i])
        plt.axis('off')
    plt.savefig("{}-images/{}.png".format(info,step))

def calc(prediction, label):
    a = [prediction[i] == label[i] for i in range(len(prediction))]
    return sum(a) / len(a)


for step in range(HPARAMS['image_N'] // HPARAMS['batch_size']):
    s = time.time()
    x_in, y_in = sess.run([images_batch, labels_batch])
    pred,loss_ = sess.run([out,loss], feed_dict={inputx: x_in, inputy: y_in})

    print(calc(np.argmax(pred, 1).tolist(), y_in.tolist()), pred.shape,loss_.item())

    prediction += np.argmax(pred, 1).tolist()
    labels += y_in.tolist()

print("acc:  ", sum([prediction[i] == labels[i]
      for i in range(len(labels))])/len(labels))
print(len(prediction))
print("over")
