import time
import tensorflow as tf
import numpy as np
import pickle
from model import VisionTransformer
# b'filenames', b'batch_label', b'fine_labels', b'coarse_labels', b'data'
filename = "./dataset/train"
HPARAMS = {
    "batch_size": 320,
    'learning_rate': 0.01
}


def read_data(filename, training):

    with open(filename, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    images = dict[b'data'].reshape([50000, 3, 32, 32])
    images = np.transpose(images, [0, 2, 3, 1])
    images = images.astype(np.float32)
    labels = np.array(dict[b'fine_labels'])

    def _preprocess(image, label):
        image = image / 255.0
        
        return image, label

    ds = tf.data.Dataset.from_tensor_slices((images, labels))
    ds = ds.map(_preprocess)
    if training:
        ds = ds.shuffle(HPARAMS['batch_size'] * 10)
        ds = ds.repeat()
    else:
        ds = ds.repeat(1)
    ds = ds.batch(batch_size=HPARAMS['batch_size'], drop_remainder=True)
    iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
    image_batch, label_batch = iterator.get_next()
    print("wok on it =================================")
    return image_batch, label_batch

model = VisionTransformer(
    image_size=32,
    patch_size=4,
    num_layers=4,
    num_classes=100,
    d_model=64,
    num_heads=4,
    mlp_dim=128,
    channels=3,
    dropout=0.1
)

loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)


images_batch, labels_batch = read_data(filename=filename, training=True)

inputx = tf.compat.v1.placeholder(
    tf.float32, shape=[HPARAMS['batch_size'], 32, 32, 3], name="inputx")
inputy = tf.compat.v1.placeholder(
    tf.int64, shape=[HPARAMS['batch_size'], ],  name="inputy")

out = model(inputx, training=True)
loss = loss_fun(inputy, out)
optimizer = tf.train.AdamOptimizer(learning_rate=HPARAMS['learning_rate'], beta1=0.9)
train_op = optimizer.minimize(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)     #  会话
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=10)

for step in range(100000000):
    s = time.time()
    x_in, y_in = sess.run([images_batch, labels_batch])
    out_, loss_, _ = sess.run([out, loss, train_op], feed_dict={
                              inputx: x_in, inputy: y_in})
    if step % 100 == 0:
        print("step: {} , loss:  {:.4f} ,  time: {:.4f}".format(
            step, loss_.item(), time.time()-s))# print 到文件里面 
    if step % 10000 == 0:
        # 每保存一个模型  测试一遍精度  保存精度最高的模型  
        # 每一个epoch模型需要保存(最多保存10个)  但是也要保存精度最高的模型(一个)  
        saver.save(sess, "./model/vit-{:.3f}".format(loss_.item()), global_step=step)

print("adsfa")
