import tensorflow as tf
from tensorflow.keras import backend as K
from humanparsing.TF_record import read_tf_record
import os
from model.u2net import U2net
import numpy as np

os.environ["CUDA_VISIBLE_DEVICES"] = '1'

"""
train u2net model with humanparsing datasets
"""


def iou(y_pred, y_true):
    smoothing_factor = 0.001
    # y_true = tf.cast(y_true, tf.float32)
    intersection = K.sum(y_pred * y_true)
    combined_area = K.sum(y_pred + y_true)
    union_area = combined_area - intersection
    return (smoothing_factor + intersection) / (smoothing_factor + union_area)


def jac_distance(y_pred, y_true):
    y_pred = K.flatten(y_pred)
    y_true = K.flatten(y_true)

    return 1.0 - iou(y_pred, y_true)


def dice_coef(y_pred, y_true):
    smoothing_factor = 0.001
    y_pred = K.flatten(y_pred)
    y_true = K.flatten(y_true)
    intersection = K.sum(y_pred * y_true)
    combined_area = K.sum(y_pred) + K.sum(y_true)
    return (2 * intersection + smoothing_factor) / (smoothing_factor + combined_area)


def dice_coef_loss(y_true, y_pred):
    """dice loss"""
    return 1.0 - dice_coef(y_true, y_pred)


IMG_SIZE = 512

resize_and_rescale = tf.keras.Sequential([
    # tf.keras.layers.experimental.preprocessing.Resizing(IMG_SIZE, IMG_SIZE),
    tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)
])

data_augmentation_x = tf.keras.Sequential([
    tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal", seed=42,
                                                          input_shape=(IMG_SIZE, IMG_SIZE, 3)),
])

data_augmentation_y = tf.keras.Sequential([
    tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal", seed=42,
                                                          input_shape=(IMG_SIZE, IMG_SIZE, 1)),
])


def prepare(ds, batch_size, shuffle=False, augment=False):
    # Resize and rescale all datasets.
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    ds = ds.map(lambda x, y: (resize_and_rescale(x), resize_and_rescale(y)),
                num_parallel_calls=AUTOTUNE)

    if shuffle:
        ds = ds.shuffle(batch_size * 100)

    # Batch all datasets.
    ds = ds.batch(batch_size)

    # Use data augmentation only on the training set.
    if augment:
        ds = ds.map(lambda x, y: (data_augmentation_x(x, training=True), data_augmentation_y(y, training=True)),
                    num_parallel_calls=AUTOTUNE)

    # Use buffered prefetching on all datasets.
    return ds.prefetch(buffer_size=AUTOTUNE)


@tf.function
def train_step(model, inputs, optimizer_x):
    """use dice loss"""
    with tf.GradientTape() as tape:
        X, y = inputs
        s1, s2, s3, s4, s5, s6, s_fuse = model(X, training=True)
        pred_loss = dice_coef_loss(y, s1)
        pred_loss += dice_coef_loss(y, s2)
        pred_loss += dice_coef_loss(y, s3)
        pred_loss += dice_coef_loss(y, s4)
        pred_loss += dice_coef_loss(y, s5)
        pred_loss += dice_coef_loss(y, s6)
        pred_loss += dice_coef_loss(y, s_fuse)
        # regularization_loss = tf.math.add_n(model.losses)
        c_loss = pred_loss
        gradients = tape.gradient(c_loss, model.trainable_variables)
        optimizer_x.apply_gradients(zip(gradients, model.trainable_variables))
    return c_loss


if __name__ == '__main__':

    root = '/record'
    model_root = '/model'
    record_path = os.path.join(root, 'train.record')
    batch = 4
    train_ds = tf.data.TFRecordDataset(record_path).map(read_tf_record, num_parallel_calls=10)
    train_ds = prepare(train_ds, batch, shuffle=True, augment=True)

    record_path = os.path.join(root, 'test.record')
    val_ds = tf.data.TFRecordDataset(record_path).map(read_tf_record, num_parallel_calls=10)
    val_ds = prepare(val_ds, batch)

    # lossObj = tf.keras.losses.BinaryCrossentropy()
    # lossObj = FocalLoss(alpha=0.75)
    model = U2net()
    epochs = 100
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    i = 0
    best = 0
    for epoch in range(epochs):
        total_loss = 0.0
        k = 0.0
        for ds in train_ds:
            total_loss += train_step(model, ds, optimizer)
            k += 1.0
            if (k + 1) % 1000 == 0:
                print("epoch:{},batch_num:{},loss:{}".format(epoch, k, total_loss / (k + 1)))

        print("epoch:{},loss:{}".format(epoch, total_loss / k))
        dice = []
        IOU = []
        for ds in val_ds:
            X, y = ds
            pre = model(X, False)
            t = dice_coef(pre, y)
            dice.append(t.numpy())
            tt = iou(pre, y)
            IOU.append(tt.numpy())
        dice = np.concatenate([dice])
        print("dice", np.mean(dice))
        IOU = np.concatenate([IOU])
        print("IOU", np.mean(IOU))
        if best < np.mean(dice):
            best = np.mean(dice)
            print('save model')
            tf.keras.models.save_model(model, model_root + '/u2net')
