from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score
from tensorflow.keras import losses
from tensorflow.python.keras.optimizer_v2.adam import Adam
import tensorflow as tf

import test
from Discriminator import Discriminator
from Generate import Generate
from ZhengqiLoader import ZhengqiLoader
from cae import CAE
import numpy as np
from tensorflow.python.keras.models import Model
from NN_cae import NN_cae, loader_cae_nn

import generate_dense
from generate_CBAM import generate_CBAM
from plt import plt_loss, GAN_loss

BACH_SIZE = 24




def get_random_block_from_data(data, batch_size):
    # 随机抽取block，不放回抽样，提高数据的使用率
    start_index = np.random.randint(0, len(data) - batch_size)
    return data[start_index:(start_index + batch_size)]


def cae_middle_out(layer_name, X):
    middle_layer_out_model = Model(cae.get_layer(layer_name).output)
    middle_layer_out_model.summary()
    middle_layer_output = middle_layer_out_model.predict(X)
    return middle_layer_output, middle_layer_output.shape


@tf.function
def train_step_cae(x):
    with tf.GradientTape() as tape:
        cae_out = cae(x, training=True)
        loss_mse = tf.reduce_mean(losses.MSE(cae_out, x))
    grads = tape.gradient(loss_mse, cae.trainable_variables)
    optimizers.apply_gradients(zip(grads, cae.trainable_variables))
    return loss_mse


@tf.function
def train_step_NN(x, y):
    with tf.GradientTape() as tape:
        nn_out = NN(x)
        loss_mse = tf.reduce_mean(losses.MSE(nn_out, y))
    grads = tape.gradient(loss_mse, NN.trainable_variables)
    optimizers_cae_nn.apply_gradients(zip(grads, NN.trainable_variables))
    return loss_mse


def gradient_penalty(Discriminator, batch_x, fake_number):
    """
    梯度惩罚项计算函数
    @param Discriminator:鉴别器
    @param batch_x:标签
    @param fake_number:生成样本
    @return:
    """
    batchsz = batch_x.shape[0]
    # 每个样本用于插值
    t = tf.random.uniform([batchsz, 1])
    # 将随机生成的样本扩展为x的形状，[b,1,1,1]=>[b,h,w,c]
    t = tf.broadcast_to(t, batch_x.shape)
    # 在真假样本之间做线性插值
    interplate = t * batch_x + (1 - t) * fake_number
    # 在梯度环境中计算D对插值样本的梯度
    with tf.GradientTape() as tape:
        tape.watch([interplate])
        d_interplote_logits = Discriminator(interplate)
        grads = tape.gradient(d_interplote_logits, interplate)
        # 计算每个样本的梯度的范数[b,h,w,c]  =>[b,-1]
        grads = tf.reshape(grads, [grads.shape[0], -1])
        gp = tf.norm(grads, axis=1)
        # 计算梯度惩罚项
        gp = tf.reduce_mean((gp - 1.) ** 2)
    return gp


def d_loss_fn(generator, discriminator, batch_z, batch_x, is_training):
    """
    计算鉴别器的损失函数
    @param generator: 生成器
    @param discriminator: 鉴别器
    @param batch_z:  鉴别器的输入
    @param batch_x: 主要变量与标签合并[None,6]
    @param is_training:
    @return:
    """
    fake_number = generator(batch_z, is_training)  # 生成假样本
    d_fake_logits = discriminator(fake_number, is_training)  # 假样本的输出
    d_real_logits = discriminator(batch_x, is_training)  # 真样本的输出
    # 计算梯度惩罚项
    gp = gradient_penalty(discriminator, batch_x, fake_number)
    loss = tf.reduce_mean(d_fake_logits) - tf.reduce_mean(d_real_logits)+ 10 * gp
    return loss ,gp


def g_loss_fn(generator, discriminator, batch_z, is_training):
    """
    生成器的损失函数
    @param generator:
    @param discriminator:
    @param batch_z:
    @param is_training:
    @return:
    """
    fake_number = generator(batch_z, is_training)
    d_fake_logits = discriminator(fake_number, is_training)
    loss = -tf.reduce_mean(d_fake_logits)
    return loss

def set_GPU():
    """GPU相关设置"""

    # 打印变量在那个设备上
    # tf.debugging.set_log_device_placement(True)
    # 获取物理GPU个数
    gpus = tf.config.experimental.list_physical_devices('GPU')
    print('物理GPU个数为：', len(gpus))
    # 设置内存自增长
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
    print('-------------已设置完GPU内存自增长--------------')
    # 获取逻辑GPU个数
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print('逻辑GPU个数为：', len(logical_gpus))



if __name__ == '__main__':
    "按需分配"
    set_GPU()
    "模型训练"
    URL = './zhengqi_train.txt'
    BACH_SIZE = 10
    loader = ZhengqiLoader(URL)
    x_train_data, train_dataset, y_train, x_test_data, y_test = loader.preprocess(BACH_SIZE)
    # 构建CAE模型
    cae = CAE()
    cae.build(input_shape=(None, 5, 5, 1))
    cae.summary()
    optimizers =Adam(learning_rate=0.001)  # cae的 lr 为0.0001
    loss_list = []
    cae_epochs = 10
    GAN_EPOCHS = 300  # gan网络训练epoch
    "训练卷积自编码器"
    for epoch in range(cae_epochs):
        print("\nStart of epoch %d" % (epoch,))
        for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
            loss_value = train_step_cae(x_batch_train)
            if step % 10 == 0:
                print(step, float(loss_value))
                loss_list.append(loss_value)
    plt_loss(loss_list)
    "卷积自编码器中间层"
    middle_out = cae.middle_out(x_train_data)

    "cae输出"
    NN = NN_cae()
    NN.build(input_shape=(middle_out.shape))
    optimizers_cae_nn = tf.keras.optimizers.RMSprop(0.0001)  # cae-nn的 lr 为0.0001
    NN.summary()
    print(y_train)
    cae_nn_dataset = loader_cae_nn(middle_out, y_train, BACH_SIZE)
    # print(cae_nn_dataset.shape)
    caenn_epochs = 12
    loss_caenn_list = []
    for epoch in range(caenn_epochs):
        print("\nStart of epoch %d" % (epoch,))
        for step, (x_batch_train, y_batch_train) in enumerate(cae_nn_dataset):
            loss_caenn = train_step_NN(x_batch_train, y_batch_train)
            if step % 10 == 0:
                print(step, float(loss_caenn))
                loss_caenn_list.append(float(loss_caenn))
    # print(float(loss_caenn_list))
    plt_loss(loss_caenn_list)
    "构建双注意力模型GAN网络的生成器  CBAM+attention"
    genertor = Generate(0.5, input_shape=(None, 5, 5, 30))
    genertor.build(input_shape=(None, 5, 5, 30))
    genertor.summary()
    "构建全连接生成器"
    # genertor,b = generate_dense.generate_dense()
    "构建CBAM注意力模块"
    # genertor,b=generate_CBAM()
    # print(b)
    GAN_epoch=10000
    batch_sz = 8
    is_training = True
    real_number = loader.discriminator_data_loader()
    # "构建简单神经网络做鉴别器"
    "构建简单神经网络做鉴别器"
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 6))
    discriminator.summary()
    "分别为生成器和判别器创建优化器"
    g_optimizer = tf.keras.optimizers.RMSprop(0.0001)
    d_optimizer = tf.keras.optimizers.RMSprop(0.01)
    D_LOSS = []
    G_LOSS = []
    "数据加载"
    real_number = loader.discriminator_data_loader()
    train_db  = tf.data.Dataset.from_tensor_slices(real_number).shuffle(100).batch(128)
    train_middle = tf.data.Dataset.from_tensor_slices(middle_out).shuffle(100).batch(128)
    "训练"
    for epoch in range(600):
        for step,batch_x in enumerate(train_db ):
            for _ in range(5):
                with tf.GradientTape() as tape:
                    batch_z = tf.random.normal([batch_x.shape[0],5,5,30])  # [64,5,5,30]
                    # batch_z = (batch_xz+batch_z1)/2
                    d_loss, gp = d_loss_fn(genertor, discriminator, batch_z, batch_x, is_training=True)
                grads = tape.gradient(d_loss, discriminator.trainable_variables)
                d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))

            with tf.GradientTape() as tape:
                batch_z = tf.random.normal([batch_x.shape[0],5,5,30])  # [64,5,5,30]
                g_loss = g_loss_fn(genertor, discriminator, batch_z, is_training=True)
            grads = tape.gradient(g_loss, genertor.trainable_variables)
            g_optimizer.apply_gradients(zip(grads, genertor.trainable_variables))
    #     D_LOSS.append(d_loss)
    #     G_LOSS.append(g_loss)
    #     print(epoch, "d-loss:", float(d_loss), "g_loss：", float(g_loss))
    # GAN_loss(D_LOSS, G_LOSS)
        if epoch != 0 and epoch % 10 == 0:
            D_LOSS.append(d_loss)
            G_LOSS.append(g_loss)
            print(epoch, "d-loss:", float(d_loss), "g_loss：", float(g_loss))
    GAN_loss(D_LOSS, G_LOSS)

    "测试部分"
    noise =  tf.random.normal([x_test_data.shape[0],5,5,30])
    print(x_test_data)
    # gan_input_test =x_test_data+noise
    feature=cae.predict(x_test_data)
    middle_out_test = cae.middle_out(x_test_data)
    predict = NN.predict(middle_out_test)
    genertor_predict=genertor.predict(noise)  #gan_input_test
    predict_final=(predict+tf.reshape(genertor_predict[:,0],[x_test_data.shape[0],1]))/2
    print(predict_final)
    plt.figure(figsize=(20, 10))
    plt.plot(predict_final, color='blue', marker='o', linestyle="--", label="double_AGAN")
    plt.plot(y_test, color='red', marker='+', linestyle="-", label="y_test")
    plt.title("CAEs")
    plt.xlabel("Sample number")
    plt.ylabel("Output value")
    plt.legend(loc="upper right")
    plt.show()
    r2=r2_score(tf.reshape(y_test,[578,1]), predict_final)
    mse=mean_squared_error(tf.reshape(y_test,[578,1]), predict_final)
    print("+++++++++++++R2：",r2,"++++++++++")
    print("+++++++++++++MSE：", mse, "++++++++++")










