import time
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns

tf.random.set_seed(1121)


class ForwardModel:
    def __init__(self, x_train_data, t_train_data, x_test, t_test, layers):
        # 加载数据
        self.x_test = tf.cast(x_test, dtype=tf.float32)
        self.t_test = tf.cast(t_test, dtype=tf.float32)
        self.data_set = tf.cast(x_train_data, dtype=tf.float32)
        self.data_points = tf.cast(t_train_data, dtype=tf.float32)

        # 归一化
        self.mu_X_set = tf.reduce_mean(self.x_test, axis=0)
        self.sigma_X_set = tf.math.reduce_std(self.x_test, axis=0)
        self.data_sets = (x_train_data - self.mu_X_set) / self.sigma_X_set
        self.ini_set = (x_test[0, :] - self.mu_X_set) / self.sigma_X_set

        self.layers = layers
        self.model1 = self.build_model(layers)
        self.model2 = self.build_model(layers)

        self.optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
        self.loss_log = []
        self.loss_res_log = []
        self.loss_ini_log = []

    def build_model(self, layers):
        inputs = tf.keras.Input(shape=(layers[0],))
        x = inputs
        for layer_size in layers[1:-1]:
            x = tf.keras.layers.Dense(layer_size, activation='tanh')(x)
        outputs = tf.keras.layers.Dense(layers[-1])(x)
        return tf.keras.Model(inputs, outputs)

    def res_func(self, Grad_matrix, x):
        Q = 3e-7 * (x[:, 16] + x[:, 17]) * x[:, 4]
        KI12 = 0.3
        KI10 = 0.15
        KI4 = 1e-3
        KI6 = 0.1
        KI2 = 1e-2
        KIga = 0.18
        KTal = 0.02
        KTbe = 5e-4

        '''Group I'''
        k8 = 3.4e-7
        yial = 0.26
        lamda0 = 0.04
        dT8 = 0.1
        la10 = 0.4
        la80 = 0.07
        gaA = 0.01
        la20 = 0.09
        muA = 0.03
        laT8 = 0.03

        '''Group II'''
        KQ = 12.6
        KD = 1e4
        laT1 = 0.03
        dT1 = 0.2

        '''Group III'''
        Tn = 8e6
        KC = 1e7
        laD = 0.17
        yiga = 0.1

        '''Group IV'''
        D0 = 2e4
        dD = 0.03
        dC = 0.18
        thC = 5e7
        dA = 0.05

        g_D = x[:, 7] / (x[:, 7] + KD)
        g_I12 = x[:, 12] / (x[:, 12] + KI12)
        g_I10 = x[:, 11] / (x[:, 11] + KI10)
        g_I4 = x[:, 9] / (x[:, 9] + KI4)
        g_I6 = x[:, 10] / (x[:, 10] + KI6)
        g_I2 = x[:, 8] / (x[:, 8] + KI2)
        g_C = x[:, 4] / (KC + x[:, 4])
        g_Iga = x[:, 15] / (x[:, 15] + KIga)
        g_Tal = x[:, 14] / (x[:, 14] + KTal)
        g_Tbe = x[:, 13] / (x[:, 13] + KTbe)

        KTI10 = 2e-3
        KTI4 = 1
        KTI12 = 1
        KTTbe = 1
        thB = 5e3
        nC = 2

        f_I10 = 1 / (1 + (x[:, 11] / KTI10))
        f_I4 = 1 / (1 + (x[:, 9] / KTI4))
        f_I12 = 1 / (1 + (x[:, 12] / KTI12))
        f_Q = 1 / (1 + (Q / KQ))
        f_Tbe = 1 / (1 + (x[:, 13] / KTTbe))
        f_B = 1 / (1 + (x[:, 5] / thB))
        f_C = 1 / (1 + (x[:, 4] / thC) ** nC)

        dy1_dt = (la10 * x[:, 3] + laT1 * 2 * x[:, 0]) * f_Q - dT1 * x[:, 0]
        dy1_dt = tf.expand_dims(dy1_dt, axis=1)

        laT2 = 0.02
        dT2 = 0.1
        dy2_dt = la20 * 2 * x[:, 3] + laT2 * x[:, 1] - dT2 * x[:, 1]
        dy2_dt = tf.expand_dims(dy2_dt, axis=1)

        dy3_dt = (la80 * 2 * x[:, 3] + laT8 * 2 * x[:, 2]) * f_Q - dT8 * x[:, 2]
        dy3_dt = tf.expand_dims(dy3_dt, axis=1)

        dy4_dt_1 = lamda0 * g_D * Tn
        dy4_dt_2 = -la10 * f_Q * x[:, 3]
        dy4_dt_3 = -la20 * 2 * x[:, 3]
        dy4_dt_4 = -la80 * 2 * f_Q * x[:, 3]
        dy4_dt = dy4_dt_1 + dy4_dt_2 + dy4_dt_3 + dy4_dt_4
        dy4_dt = tf.expand_dims(dy4_dt, axis=1)

        laC0 = 0.5
        laC1 = 0.5
        dy5_dt = (laC0 + laC1) * f_C * x[:, 4] - (k8 * x[:, 2] + yial + yiga) * x[:, 4] - dC * x[:, 4]
        dy5_dt = tf.expand_dims(dy5_dt, axis=1)

        laB = 0.49
        dB = 0.08
        dy6_dt = laB * (g_I2 + g_I6) * f_B * x[:, 5] - dB * x[:, 5]
        dy6_dt = tf.expand_dims(dy6_dt, axis=1)

        laM = 0.022
        M0 = 1e6
        dM = 0.05
        dy7_dt = laM * g_C * M0 - dM * x[:, 6]
        dy7_dt = tf.expand_dims(dy7_dt, axis=1)

        dy8_dt = laD * (g_C + 1) * D0 - dD * x[:, 7]
        dy8_dt = tf.expand_dims(dy8_dt, axis=1)

        laI2T1 = 4e-8
        dI2 = 2
        dy9_dt = laI2T1 * x[:, 0] - dI2 * x[:, 8]
        dy9_dt = tf.expand_dims(dy9_dt, axis=1)

        laI4T2 = 4.5e-8
        dI4 = 15
        dy10_dt = laI4T2 * x[:, 1] - dI4 * x[:, 9]
        dy10_dt = tf.expand_dims(dy10_dt, axis=1)

        laI6M = 8e-8
        laI6B = 8e-8
        dI6 = 12
        dy11_dt = laI6M * x[:, 6] + laI6B * x[:, 5] - dI6 * x[:, 10]
        dy11_dt = tf.expand_dims(dy11_dt, axis=1)

        laI10T2 = 7.9e-9
        laI10C = 7.9e-9
        muI = 0.216
        dI10 = 10
        dy12_dt = laI10T2 * x[:, 1] + laI10C * x[:, 4] - muI * x[:, 11] * x[:, 18] - dI10 * x[:, 11]
        dy12_dt = tf.expand_dims(dy12_dt, axis=1)

        laI12M = 2.7e-6
        laI12D = 2.7e-6
        dI12 = 2
        dy13_dt = laI12M * x[:, 6] + laI12D * x[:, 7] - dI12 * x[:, 12]
        dy13_dt = tf.expand_dims(dy13_dt, axis=1)

        laTbeC = 6e-11
        dTbe = 2
        dy14_dt = laTbeC * x[:, 4] - dTbe * x[:, 13]
        dy14_dt = tf.expand_dims(dy14_dt, axis=1)

        laTal1 = 2.1e-7
        laTal8 = 2.1e-7
        dTal = 15
        dy15_dt = laTal1 * x[:, 0] + laTal8 * x[:, 2] - dTal * x[:, 14]
        dy15_dt = tf.expand_dims(dy15_dt, axis=1)

        laIgaT1 = 2.5e-7
        laIgaT8 = 2.5e-8
        dIga = 18
        dy16_dt = laIgaT1 * x[:, 0] + laIgaT8 * x[:, 2] - dIga * x[:, 15]
        dy16_dt = tf.expand_dims(dy16_dt, axis=1)

        dy17_dt_1 = (x[:, 16] / x[:, 0]) * (la10 * x[:, 3] + laT1 * 2 * x[:, 0]) * f_Q
        dy17_dt_2 = -x[:, 16] * dT1 - muA * x[:, 16] * x[:, 19]
        dy17_dt = dy17_dt_1 + dy17_dt_2
        dy17_dt = tf.expand_dims(dy17_dt, axis=1)

        dy18_dt_1 = (x[:, 17] / x[:, 2]) * (
                la80 * 2 * x[:, 3] + laT8 * 2 * x[:, 2]) * f_Q
        dy18_dt_2 = -x[:, 17] * dT8 - muA * x[:, 17] * x[:, 19]
        dy18_dt = dy18_dt_1 + dy18_dt_2
        dy18_dt = tf.expand_dims(dy18_dt, axis=1)

        gaI = 15
        muI = 0.216
        dI = 0.069
        dy19_dt = gaI - muI * x[:, 11] * x[:, 18] - dI * x[:, 19]
        dy19_dt = tf.expand_dims(dy19_dt, axis=1)

        dy20_dt = gaA - muA * (x[:, 16] + x[:, 17]) * x[:, 19] - dA * x[:, 19]
        dy20_dt = tf.expand_dims(dy20_dt, axis=1)
        # pre_grad = tf.concat([dy1_dt, dy3_dt, dy4_dt, dy5_dt, dy8_dt, dy17_dt, dy18_dt, dy20_dt], axis=1)
        pre_grad = tf.concat([dy1_dt, dy2_dt, dy3_dt, dy4_dt, dy5_dt, dy6_dt, dy7_dt, dy8_dt, dy9_dt, dy10_dt, dy11_dt,
                              dy12_dt, dy13_dt, dy14_dt, dy15_dt, dy16_dt, dy17_dt, dy18_dt, dy19_dt, dy20_dt], axis=1)
        pre_grad = self.Shrink3(pre_grad)

        column_indices = np.arange(pre_grad.shape[1])
        np.random.shuffle(column_indices)
        selected_index = column_indices[:8]
        selected_indices_tensor = tf.constant(selected_index, dtype=tf.int32)

        pre_grad = tf.gather(pre_grad, selected_indices_tensor, axis=1)
        Grad_matrix = tf.gather(Grad_matrix, selected_indices_tensor, axis=1)

        loss_res = tf.keras.losses.MeanSquaredError()(pre_grad, Grad_matrix)
        return loss_res

    def Shrink(self, X_set):
        fix_x = (X_set - self.mu_X_set) / self.sigma_X_set
        return fix_x

    def Shrink3(self, X_set):
        fix_x = (X_set) / self.sigma_X_set
        return fix_x

    def Release(self, X_set):
        fix_x = X_set * self.sigma_X_set + self.mu_X_set
        return fix_x

    def Shrink2(self, X_set):
        a1 = self.sigma_X_set[0]
        a3 = self.sigma_X_set[2]
        a4 = self.sigma_X_set[3]
        a5 = self.sigma_X_set[4]
        a8 = self.sigma_X_set[7]
        a17 = self.sigma_X_set[16]
        a18 = self.sigma_X_set[17]
        a20 = self.sigma_X_set[19]
        kk = tf.stack([a1, a3, a4, a5, a8, a17, a18, a20], axis=0)
        fix_x = X_set / kk
        return fix_x

    def batch(self, X_set, batch_size):
        N = X_set.shape[0]
        indices = tf.random.shuffle(tf.range(N))[:batch_size]
        batch_x = tf.gather(X_set, indices)
        return batch_x

    @tf.function
    def train_step(self, t_res, t_ini, ini_sets, res_weight):
        with tf.GradientTape() as tape:
            ini_pred1 = self.model1(t_ini)
            ini_pred2 = self.model2(t_ini)
            ini_pred = tf.concat([ini_pred1, ini_pred2], axis=1)
            loss_ini = tf.keras.losses.MeanSquaredError()(ini_sets, ini_pred)

            with tf.GradientTape(persistent=True) as t_g:
                or_res_pred1 = self.model1(t_res)
                or_res_pred2 = self.model2(t_res)
                or_res_pred = tf.concat([or_res_pred1, or_res_pred2], axis=1)
                X_hat = or_res_pred
                a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19 = tf.split(
                    X_hat, 20, axis=1)

            grad0 = t_g.gradient(a0, t_res)
            grad1 = t_g.gradient(a1, t_res)
            grad2 = t_g.gradient(a2, t_res)
            grad3 = t_g.gradient(a3, t_res)
            grad4 = t_g.gradient(a4, t_res)
            grad5 = t_g.gradient(a5, t_res)
            grad6 = t_g.gradient(a6, t_res)
            grad7 = t_g.gradient(a7, t_res)
            grad8 = t_g.gradient(a8, t_res)
            grad9 = t_g.gradient(a9, t_res)
            grad10 = t_g.gradient(a10, t_res)
            grad11 = t_g.gradient(a11, t_res)
            grad12 = t_g.gradient(a12, t_res)
            grad13 = t_g.gradient(a13, t_res)
            grad14 = t_g.gradient(a14, t_res)
            grad15 = t_g.gradient(a15, t_res)
            grad16 = t_g.gradient(a16, t_res)
            grad17 = t_g.gradient(a17, t_res)
            grad18 = t_g.gradient(a18, t_res)
            grad19 = t_g.gradient(a19, t_res)
            grad_X = tf.concat(
                [grad0, grad1, grad2, grad3, grad4, grad5, grad6, grad7, grad8, grad9, grad10, grad11, grad12,
                 grad13, grad14, grad15, grad16, grad17, grad18, grad19], axis=1)
            X_hat_release = self.Release(X_hat)
            loss_res = self.res_func(grad_X, X_hat_release)
            loss_res = tf.cast(loss_res, tf.float32)

            loss_total = loss_res + loss_ini

        gradients = tape.gradient(loss_total, self.model1.trainable_variables + self.model2.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, self.model1.trainable_variables + self.model2.trainable_variables))

        return loss_total, loss_res, loss_ini

    def train(self, iteration, res_points, t_ini, ini_sets, batch_size, res_weight):
        start_time = time.time()
        for epoch in range(iteration):
            t_res = tf.Variable(self.batch(res_points, batch_size))
            loss_total, loss_res, loss_ini = self.train_step(t_res, t_ini, ini_sets, res_weight)

            if epoch % 100 == 0:
                elapsed = time.time() - start_time
                print('It: %d, Loss: %.3e, Loss_res: %.3e, Loss_ini: %.3e, Time: %.2f' % (
                    epoch, loss_total.numpy(), loss_res.numpy(), loss_ini.numpy(), elapsed))
                self.loss_log.append(loss_total.numpy())
                self.loss_res_log.append(loss_res.numpy())
                self.loss_ini_log.append(loss_ini.numpy())
                start_time = time.time()

    def predict(self, t_test):
        Tumor_pred1 = self.model1(t_test)
        Tumor_pred2 = self.model2(t_test)
        Tumor_pred = tf.concat([Tumor_pred1, Tumor_pred2], axis=1)
        Tumor_pred = Tumor_pred * self.sigma_X_set + self.mu_X_set
        return Tumor_pred.numpy()


# class ActiveLearningModel(ForwardModel):
#     def __init__(self, x_train_data, t_train_data, x_test, t_test, layers, unlabeled_data, unlabeled_labels):
#         super().__init__(x_train_data, t_train_data, x_test, t_test, layers)
#         self.unlabeled_data = tf.cast(unlabeled_data, dtype=tf.float32)
#         self.unlabeled_labels = tf.cast(unlabeled_labels, dtype=tf.float32)
#
#     def uncertainty_sampling(self, num_samples=10):
#         # 预测未标注数据的概率分布
#         predictions = self.model(self.unlabeled_data)
#         # 计算预测的概率分布的熵
#         entropy = tf.keras.losses.categorical_crossentropy(predictions, predictions, from_logits=True)
#         # 选择熵最大的样本
#         top_indices = tf.argsort(entropy)[-num_samples:]
#         return top_indices
#
#     def active_learning(self, num_iterations=10, num_samples=10, batch_size=256, res_weight=10):
#         for iteration in range(num_iterations):
#             print(f"Iteration {iteration + 1}/{num_iterations}")
#             # 训练模型
#             self.train(5000, res_set, t_ini, model.ini_set, batch_size, res_weight)
#
#             # 选择最有价值的样本
#             selected_indices = self.uncertainty_sampling(num_samples)
#             selected_data = tf.gather(self.unlabeled_data, selected_indices)
#             selected_labels = tf.gather(self.unlabeled_labels, selected_indices)
#
#             # 将新标注的数据加入训练集
#             self.data_set = tf.concat([self.data_set, selected_data], axis=0)
#             self.data_points = tf.concat([self.data_points, selected_labels], axis=0)
#
#             # 从未标注数据集中移除已选择的样本
#             mask = tf.ones_like(self.unlabeled_data, dtype=tf.bool)
#             mask = tf.tensor_scatter_nd_update(mask, tf.expand_dims(selected_indices, axis=1),
#                                                tf.zeros(num_samples, dtype=tf.bool))
#             self.unlabeled_data = tf.boolean_mask(self.unlabeled_data, mask)
#             self.unlabeled_labels = tf.boolean_mask(self.unlabeled_labels, mask)
#
#             # 重新训练模型
#             self.train(5000, res_set, t_ini, model.ini_set, batch_size, res_weight)


if __name__ == '__main__':
    # 加载数据
    x_test = np.load('../Disappear/X_test_case3.npy')
    t_test = np.load('../Disappear/t_test_case3.npy')
    x_train_data = np.load('../Disappear/X_inverse_case3.npy')
    t_train_data = np.load('../Disappear/t_inverse_case3.npy')

    # 初始数据
    t_ini = tf.zeros((1, 1))
    y0 = x_test[0, :]
    y0 = np.expand_dims(y0, axis=0)

    layers = [1, 128, 128, 128, 128, 128, 128, 128, 10]
    model = ForwardModel(x_train_data, t_train_data, x_test, t_test, layers)

    batch_sizes = 256
    stage1 = tf.linspace(0, 25, 250)
    stage2 = tf.linspace(25, 200, 750)
    point_set = tf.concat([stage1, stage2], axis=0)
    res_set = tf.expand_dims(point_set, 1)

    model.train(5000, res_set, t_ini, model.ini_set, batch_sizes, 10)
    Tumor_pred = model.predict(model.t_test)
    np.save('Tumor_forward.npy', Tumor_pred)
