import tensorflow as tf
from TF_record import read_tf_record
import os
from model import FaceModel
import pickle
import numpy as np
from topN_v3 import MAP

os.environ["CUDA_VISIBLE_DEVICES"] = '1'

gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    print('use gpu:', gpu)
    tf.config.experimental.set_memory_growth(gpu, True)

IMG_SIZE = 224

resize_and_rescale = tf.keras.Sequential([
    # tf.keras.layers.experimental.preprocessing.Resizing(IMG_SIZE, IMG_SIZE),
    tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)
])

data_augmentation = tf.keras.Sequential([
    tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical",
                                                          input_shape=(IMG_SIZE, IMG_SIZE, 3)),
    tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
    # tf.keras.layers.experimental.preprocessing.RandomZoom(0.1)
])


def prepare(ds, batch_size, shuffle=False, augment=False):
    # Resize and rescale all datasets.
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    ds = ds.map(lambda x, y: (resize_and_rescale(x), y),
                num_parallel_calls=AUTOTUNE)

    if shuffle:
        ds = ds.shuffle(batch_size * 1000)

    # Batch all datasets.
    ds = ds.batch(batch_size)

    # Use data augmentation only on the training set.
    if augment:
        ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y),
                    num_parallel_calls=AUTOTUNE)

    # Use buffered prefetching on all datasets.
    return ds.prefetch(buffer_size=AUTOTUNE)


@tf.function
def train_step(model, inputs, optimizer_x):
    with tf.GradientTape() as tape:
        pred_loss = model(inputs, training=True)
        regularization_loss = tf.math.add_n(model.losses)
        c_loss = pred_loss + regularization_loss
        gradients = tape.gradient(c_loss, model.trainable_variables)
        optimizer_x.apply_gradients(zip(gradients, model.trainable_variables))
    return c_loss


def test(model, testloader):
    features = []
    img_index = []
    for idx, ds in enumerate(testloader):
        f = model(ds,False).numpy()
        # f = np.int8(f > 0).astype(np.uint8)  # to binary.
        features.append(f)
        img_index.append(ds[1].numpy())
        #            if idx==10:break
        if idx % 1000 == 0:
            print('{}'.format(idx))
    features = np.concatenate(features)  # (N,dim)
    img_index = np.concatenate(img_index).squeeze()

    Top10_P, Top10_R = MAP(features, img_index)
    print('\n')
    print("Top10_P={}".format(Top10_P))
    print("Top10_R={}".format(Top10_R))
    return Top10_P, Top10_R


if __name__ == '__main__':
    root = '/hdd9/ppp/image_retrival'

    with open(root + '/cls.pk', 'rb') as fp:
        cls = pickle.load(fp)
    cls_num = len(cls.keys())
    print("cls_num=", cls_num)
    record_path = root + '/train.record'
    batch = 48
    train_ds = tf.data.TFRecordDataset(record_path).map(read_tf_record, num_parallel_calls=10)
    train_ds = prepare(train_ds, batch, shuffle=True, augment=True)

    record_path = root + '/test.record'
    val_ds = tf.data.TFRecordDataset(record_path).map(read_tf_record, num_parallel_calls=10)
    val_ds = prepare(val_ds, batch)

    model = FaceModel(dim=256, n_cls=cls_num, scale=30, margin=0.1, loss_type='cos')
    epochs = 10
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, decay=1e-4,clipvalue=10.0)
    i = 0
    best_top10 = 0
    for epoch in range(epochs):
        total_loss = 0.0
        k = 0.0
        for ds in train_ds:
            total_loss += train_step(model, ds, optimizer)
            k += 1.0
            if (k + 1) % 1000 == 0:
                print("epoch:{},batch_num:{},loss:{}".format(epoch, k, total_loss / (k + 1)))

        print("epoch:{},loss:{}".format(epoch, total_loss / k))

        # 预测
        Top10_P, Top10_R = test(model, val_ds)
        if best_top10 < Top10_P:
            best_top10 = Top10_P
            tf.keras.Model.save(model, root + '/model')
