import time
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns

tf.random.set_seed(1121)
x_test = np.load('./Disappear/X_test_case3.npy')
t_test = np.load('./Disappear/t_test_case3.npy')
data_set = np.load('./Disappear/X_inverse_case3.npy')
# data_set = tf.cast(x_train_data, dtype=tf.float32)
t_train_data = np.load('./Disappear/t_inverse_case3.npy')
data_points = tf.cast(t_train_data, dtype=tf.float32)
x_test = tf.cast(x_test, dtype=tf.float32)
t_test = tf.cast(t_test, dtype=tf.float32)
mu_X_set = tf.reduce_mean(x_test, axis=0)
sigma_X_set = tf.math.reduce_std(x_test, axis=0)


def batch(X_set, batch_size):
    N = X_set.shape[0]
    indices = tf.random.shuffle(tf.range(N))[:batch_size]
    batch_x = tf.gather(X_set, indices)
    return batch_x


def Shrink(X_set):
    fix_x = (X_set - mu_X_set) / sigma_X_set
    return fix_x


def Release(X_set):
    fix_x = X_set * sigma_X_set + mu_X_set
    return fix_x


def Shrink2(X_set):
    a1 = sigma_X_set[0]
    a3 = sigma_X_set[2]
    a4 = sigma_X_set[3]
    a5 = sigma_X_set[4]
    a8 = sigma_X_set[7]
    a17 = sigma_X_set[16]
    a18 = sigma_X_set[17]
    a20 = sigma_X_set[19]
    kk = tf.stack([a1, a3, a4, a5, a8, a17, a18, a20], axis=0)
    fix_x = X_set / kk
    return fix_x


def res_func(Grad_matrix, x):
    Q = 3e-7 * (x[:, 16] + x[:, 17]) * x[:, 4]
    # KD = 1e4
    KD = net.KD * 1e4
    KI12 = 0.3
    KI10 = 0.15
    KI4 = 1e-3
    KI6 = 0.1
    KI2 = 1e-2
    KC = 1e7
    KIga = 0.18
    KTal = 0.02
    KTbe = 5e-4

    g_D = x[:, 7] / (x[:, 7] + KD)
    g_I12 = x[:, 12] / (x[:, 12] + KI12)
    g_I10 = x[:, 11] / (x[:, 11] + KI10)
    g_I4 = x[:, 9] / (x[:, 9] + KI4)
    g_I6 = x[:, 10] / (x[:, 10] + KI6)
    g_I2 = x[:, 8] / (x[:, 8] + KI2)
    g_C = x[:, 4] / (KC + x[:, 4])
    g_Iga = x[:, 15] / (x[:, 15] + KIga)
    g_Tal = x[:, 14] / (x[:, 14] + KTal)
    g_Tbe = x[:, 13] / (x[:, 13] + KTbe)

    KTI10 = 2e-3
    KTI4 = 1
    KQ = net.KQ * 10
    KTI12 = 1
    KTTbe = 1
    thB = 5e3
    thC = 5e7
    nC = 2

    f_I10 = 1 / (1 + (x[:, 11] / KTI10))
    f_I4 = 1 / (1 + (x[:, 9] / KTI4))
    f_I12 = 1 / (1 + (x[:, 12] / KTI12))
    f_Q = 1 / (1 + (Q / KQ))
    f_Tbe = 1 / (1 + (x[:, 13] / KTTbe))
    f_B = 1 / (1 + (x[:, 5] / thB))
    f_C = 1 / (1 + (x[:, 4] / thC) ** nC)

    laT1 = net.laT1
    dT1 = net.dT1
    la10 = 0.4
    la20 = 0.09
    la80 = 0.07
    dy1_dt = (la10 * x[:, 3] + laT1 * 2 * x[:, 0]) * f_Q - dT1 * x[:, 0]
    dy1_dt = tf.expand_dims(dy1_dt, axis=1)

    laT2 = 0.02
    dT2 = 0.1
    dy2_dt = la20 * 2 * x[:, 3] + laT2 * x[:, 1] - dT2 * x[:, 1]
    dy2_dt = tf.expand_dims(dy2_dt, axis=1)

    laT8 = 0.03
    dT8 = 0.1

    dy3_dt = (la80 * 2 * x[:, 3] + laT8 * 2 * x[:, 2]) * f_Q - dT8 * x[:, 2]
    dy3_dt = tf.expand_dims(dy3_dt, axis=1)

    lamda0 = 0.04
    Tn = 8e6
    dy4_dt_1 = lamda0 * g_D * Tn
    dy4_dt_2 = -la10 * f_Q * x[:, 3]
    dy4_dt_3 = -la20 * 2 * x[:, 3]
    dy4_dt_4 = -la80 * 2 * f_Q * x[:, 3]
    dy4_dt = dy4_dt_1 + dy4_dt_2 + dy4_dt_3 + dy4_dt_4
    dy4_dt = tf.expand_dims(dy4_dt, axis=1)

    dC = 0.18
    k8 = 3.4e-7
    yial = 0.26
    yiga = 0.1
    laC0 = 0.5
    laC1 = 0.5
    dy5_dt = (laC0 + laC1) * f_C * x[:, 4] - (k8 * x[:, 2] + yial + yiga) * x[:, 4] - dC * x[:, 4]
    dy5_dt = tf.expand_dims(dy5_dt, axis=1)

    laB = 0.49
    dB = 0.08
    dy6_dt = laB * (g_I2 + g_I6) * f_B * x[:, 5] - dB * x[:, 5]
    dy6_dt = tf.expand_dims(dy6_dt, axis=1)

    laM = 0.022
    M0 = 1e6
    dM = 0.05
    dy7_dt = laM * g_C * M0 - dM * x[:, 6]
    dy7_dt = tf.expand_dims(dy7_dt, axis=1)

    laD = 0.17
    D0 = 2e4
    dD = 0.03
    dy8_dt = laD * (g_C + 1) * D0 - dD * x[:, 7]
    dy8_dt = tf.expand_dims(dy8_dt, axis=1)

    laI2T1 = 4e-8
    dI2 = 2
    dy9_dt = laI2T1 * x[:, 0] - dI2 * x[:, 8]
    dy9_dt = tf.expand_dims(dy9_dt, axis=1)

    laI4T2 = 4.5e-8
    dI4 = 15
    dy10_dt = laI4T2 * x[:, 1] - dI4 * x[:, 9]
    dy10_dt = tf.expand_dims(dy10_dt, axis=1)

    laI6M = 8e-8
    laI6B = 8e-8
    dI6 = 12
    dy11_dt = laI6M * x[:, 6] + laI6B * x[:, 5] - dI6 * x[:, 10]
    dy11_dt = tf.expand_dims(dy11_dt, axis=1)

    laI10T2 = 7.9e-9
    laI10C = 7.9e-9
    muI = 0.216
    dI10 = 10
    dy12_dt = laI10T2 * x[:, 1] + laI10C * x[:, 4] - muI * x[:, 11] * x[:, 18] - dI10 * x[:, 11]
    dy12_dt = tf.expand_dims(dy12_dt, axis=1)

    laI12M = 2.7e-6
    laI12D = 2.7e-6
    dI12 = 2
    dy13_dt = laI12M * x[:, 6] + laI12D * x[:, 7] - dI12 * x[:, 12]
    dy13_dt = tf.expand_dims(dy13_dt, axis=1)

    laTbeC = 6e-11
    dTbe = 2
    dy14_dt = laTbeC * x[:, 4] - dTbe * x[:, 13]
    dy14_dt = tf.expand_dims(dy14_dt, axis=1)

    laTal1 = 2.1e-7
    laTal8 = 2.1e-7
    dTal = 15
    dy15_dt = laTal1 * x[:, 0] + laTal8 * x[:, 2] - dTal * x[:, 14]
    dy15_dt = tf.expand_dims(dy15_dt, axis=1)

    laIgaT1 = 2.5e-7
    laIgaT8 = 2.5e-8
    dIga = 18
    dy16_dt = laIgaT1 * x[:, 0] + laIgaT8 * x[:, 2] - dIga * x[:, 15]
    dy16_dt = tf.expand_dims(dy16_dt, axis=1)

    muA = 0.03
    dy17_dt_1 = (x[:, 16] / x[:, 0]) * (la10 * x[:, 3] + laT1 * 2 * x[:, 0]) * f_Q
    dy17_dt_2 = -x[:, 16] * dT1 - muA * x[:, 16] * x[:, 19]
    dy17_dt = dy17_dt_1 + dy17_dt_2
    dy17_dt = tf.expand_dims(dy17_dt, axis=1)

    dy18_dt_1 = (x[:, 17] / x[:, 2]) * (
            la80 * 2 * x[:, 3] + laT8 * 2 * x[:, 2]) * f_Q
    dy18_dt_2 = -x[:, 17] * dT8 - muA * x[:, 17] * x[:, 19]
    dy18_dt = dy18_dt_1 + dy18_dt_2
    dy18_dt = tf.expand_dims(dy18_dt, axis=1)

    gaI = 15
    muI = 0.216
    dI = 0.069
    dy19_dt = gaI - muI * x[:, 11] * x[:, 18] - dI * x[:, 19]
    dy19_dt = tf.expand_dims(dy19_dt, axis=1)

    gaA = 0.01
    dA = 0.05
    dy20_dt = gaA - muA * (x[:, 16] + x[:, 17]) * x[:, 19] - dA * x[:, 19]
    dy20_dt = tf.expand_dims(dy20_dt, axis=1)

    pre_grad = tf.concat([dy1_dt, dy3_dt, dy4_dt, dy5_dt, dy8_dt, dy17_dt, dy18_dt, dy20_dt], axis=1)
    # pre_grad = tf.concat([dy1_dt, dy2_dt, dy3_dt, dy4_dt, dy5_dt, dy6_dt, dy7_dt, dy8_dt, dy9_dt, dy10_dt, dy11_dt,
    # dy12_dt, dy13_dt, dy14_dt, dy15_dt, dy16_dt, dy17_dt, dy18_dt, dy19_dt, dy20_dt], axis=1)
    pre_grad = Shrink2(pre_grad)
    loss_res = mse(pre_grad, Grad_matrix)

    return loss_res


def train(iteration, res_points, t_ini, ini_sets, val_points, val_data, batch_size, res_weight, data_weight, mode):
    start_time = time.time()
    for epoch in range(iteration):
        with tf.GradientTape() as tape:
            or_data_pred = net(val_points)
            if mode == 0:
                T1_pred = or_data_pred[:, 0]
                C_pred = or_data_pred[:, 4]
                loss_data1 = mse(val_data[:, 0], T1_pred)
                loss_data2 = mse(val_data[:, 4], C_pred)
                loss_data = loss_data1 + loss_data2
            elif mode == 1:
                loss_data = mse(val_data, or_data_pred)

            or_ini_pred = net(t_ini)
            loss_ini = mse(ini_sets, or_ini_pred)
            t_res = tf.Variable(batch(res_points, batch_size))
            with tf.GradientTape(persistent=True) as t_g:
                or_res_pred = net(t_res)
                X_hat = or_res_pred
                a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19 = tf.split(
                    X_hat, 20, axis=1)

            grad0 = t_g.gradient(a0, t_res)
            grad2 = t_g.gradient(a2, t_res)
            grad3 = t_g.gradient(a3, t_res)
            grad4 = t_g.gradient(a4, t_res)
            grad7 = t_g.gradient(a7, t_res)
            grad16 = t_g.gradient(a16, t_res)
            grad17 = t_g.gradient(a17, t_res)
            grad19 = t_g.gradient(a19, t_res)
            grad_X = tf.stack(
                [grad0[:, 0], grad2[:, 0], grad3[:, 0], grad4[:, 0], grad7[:, 0], grad16[:, 0], grad17[:, 0],
                 grad19[:, 0]], axis=1)
            X_hat_release = Release(X_hat)
            loss_res = res_func(grad_X, X_hat_release)
            loss_res = tf.cast(loss_res, tf.float32)
            loss_total = res_weight * loss_res + loss_ini + loss_data

        gradients = tape.gradient(loss_total, net.trainable_variables)
        optimizer.apply_gradients(zip(gradients, net.trainable_variables))

        if epoch % 100 == 0:
            elapsed = time.time() - start_time
            KD = net.KD.numpy()
            KQ = net.KQ.numpy()
            dT1 = net.dT1.numpy()
            laT1 = net.laT1.numpy()
            print('It: %d, Loss: %.3e, Loss_res: %.3e, Loss_ini: %.3e, Loss_data: %.3e,time: %.2f' % (
                epoch, loss_total, loss_res, loss_ini, loss_data, elapsed))
            loss_log.append(loss_total)
            loss_res_log.append(loss_res)
            loss_data_log.append(loss_data)
            loss_ini_log.append(loss_ini)
            param_log.append([KQ, KD, laT1, dT1])
            start_time = time.time()


class ClipConstraint(tf.keras.constraints.Constraint):
    def __init__(self, min_value, max_value):
        self.min_value = min_value
        self.max_value = max_value

    def __call__(self, w):
        return tf.clip_by_value(w, self.min_value, self.max_value)


class Inverse2(tf.keras.Model):
    def __init__(self, NN, act, num_hidden_layers):
        super(Inverse2, self).__init__()
        self.act = act
        self.KD = tf.Variable(1.5, constraint=ClipConstraint(min_value=0.2, max_value=1.8), trainable=True)
        self.KQ = tf.Variable(1.5, constraint=ClipConstraint(min_value=0.252, max_value=2.268), trainable=True)
        self.laT1 = tf.Variable(0.03, constraint=ClipConstraint(min_value=6e-3, max_value=3.6e-2), trainable=True)
        self.dT1 = tf.Variable(0.3, constraint=ClipConstraint(min_value=4e-2, max_value=0.36), trainable=True)
        self.input_layer = tf.keras.layers.Dense(NN, input_dim=1)
        self.hidden_layers = [
            tf.keras.layers.Dense(NN, kernel_initializer="glorot_normal")
            for _ in range(num_hidden_layers)
        ]
        self.output_layer = tf.keras.layers.Dense(20)

    def call(self, x):
        out = self.act(self.input_layer(x))
        for layer in self.hidden_layers:
            out = self.act(layer(out))
        out = self.output_layer(out)
        return out

if __name__ == '__main__':
    function = tf.tanh
    net = Inverse2(128, function, 7)
    mse = tf.keras.losses.MeanSquaredError()
    starter_learning_rate = 1e-3
    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(starter_learning_rate, 500, 0.9,
                                                                   staircase=False)
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    net.compile(optimizer, loss=mse)
    batch_sizes = 256

    loss_res_log = []
    loss_ini_log = []
    loss_data_log = []
    loss_log = []
    param_log = []

    t_ini = tf.zeros((1, 1))
    y0 = x_test[0, :]
    y0 = tf.expand_dims(y0, axis=0)

    ini_set = Shrink(y0)

    stage1 = tf.linspace(0, 25, 250)
    stage2 = tf.linspace(25, 200, 750)
    point_set = tf.concat([stage1, stage2], axis=0)
    res_set = tf.expand_dims(point_set, 1)

    data_sets = Shrink(data_set)

    # data_sets = data_sets + 0.2 * np.std(data_sets) * np.random.randn(data_sets.shape[0], data_sets.shape[1])

    train(100, res_set, t_ini, ini_set, data_points, data_sets, batch_sizes, data_weight=1, res_weight=10, mode=1)
    train(400, res_set, t_ini, ini_set, data_points, data_sets, batch_sizes, data_weight=1, res_weight=10, mode=0)
    Tumor_pred = net(t_test)
    param_list = np.array(param_log)
    param1 = param_list[-1]