import os
import numpy as np
import tensorflow as tf
from keras.utils.np_utils import to_categorical
from tensorflow.keras import layers, Model, Sequential
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.losses import binary_crossentropy, categorical_crossentropy
from tensorflow.keras.metrics import binary_accuracy
from tensorflow.keras import backend as K
from tqdm import tqdm
import matplotlib.pyplot as plt
import time
from functools import partial
import signal
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler


class WGAN_GP():
    def __init__(self, gen_optimizer, disc_optimizer, input_dim, noise_dim=256, dropout=0.2,
                 checkpoint_dir='./checkpoints'):
        self.noise_dim = noise_dim
        self.dropout = dropout
        self.input_dim = input_dim
        self.checkpoint_dir = checkpoint_dir

        self.generator_optimizer = gen_optimizer
        self.discriminator_optimizer = disc_optimizer

        self.history = {}
        self.load_checkpoint()

        self.generator = self.build_generator()
        self.discriminator = self.build_discriminator()

        # Signal handling for graceful shutdown
        signal.signal(signal.SIGINT, self.signal_handler)

    def build_generator(self):
        model = Sequential()
        model.add(layers.Dense(128, use_bias=False, input_shape=(self.noise_dim,)))
        model.add(layers.BatchNormalization())
        model.add(layers.ReLU())

        model.add(layers.Dense(256, use_bias=False, input_shape=(self.noise_dim,)))
        model.add(layers.BatchNormalization())
        model.add(layers.ReLU())

        model.add(layers.Dense(512, use_bias=False))
        model.add(layers.BatchNormalization())
        model.add(layers.ReLU())

        model.add(layers.Dense(1024, use_bias=False))  # 新增一层
        model.add(layers.BatchNormalization())
        model.add(layers.ReLU())

        model.add(layers.Dense(self.input_dim, use_bias=False))
        model.add(layers.Reshape((1, 22, 1125)))

        # Attention mechanism
        attention = layers.Conv2D(64, kernel_size=(1, 3), padding='same', activation='relu')(model.output)
        attention = layers.Conv2D(1, kernel_size=(1, 1), activation='sigmoid')(attention)

        # Multiply attention with the output
        output = layers.Multiply()([model.output, attention])

        return Model(inputs=model.input, outputs=output)

    def build_discriminator(self):
        model = Sequential()
        model.add(layers.Flatten(input_shape=(1, 22, 1125)))
        model.add(layers.Dense(1024, use_bias=False))
        model.add(layers.LeakyReLU(alpha=0.2))

        model.add(layers.Dense(512, use_bias=False))
        model.add(layers.LeakyReLU(alpha=0.2))

        model.add(layers.Dense(256, use_bias=False))  # 新增一层
        model.add(layers.LeakyReLU(alpha=0.2))

        model.add(layers.Dense(128, use_bias=False))  # 新增一层
        model.add(layers.LeakyReLU(alpha=0.2))

        model.add(layers.Dense(1, use_bias=False))

        return Model(inputs=model.input, outputs=model.output)

    def generate_fake_data(self, N=256):
        noise = tf.random.normal([N, self.noise_dim]).numpy()
        return self.generator(noise, training=False).numpy(), noise

    def disc_loss(self, fake_logits, real_logits):
        return tf.reduce_mean(fake_logits) - tf.reduce_mean(real_logits)+1e-8

    def gen_loss(self, fake_logits):
        return - tf.reduce_mean(fake_logits)+1e-8

    def gradient_penalty(self, discriminator, real_data, gen_signal):
        eps = tf.random.uniform([real_data.shape[0], 1, 1, 1], 0., 1.)
        inter = real_data + (eps * (real_data - gen_signal))
        with tf.GradientTape() as tape:
            tape.watch(inter)
            pred = discriminator(inter)

        grad = tape.gradient(pred, inter)[0]
        grad_l2_norm = tf.sqrt(tf.reduce_sum(tf.square(grad)))

        return tf.reduce_mean((grad_l2_norm - 1.0) ** 2)

    @tf.function
    def train_step(self, images):
        noise = tf.random.normal([images.shape[0], self.noise_dim])

        with tf.GradientTape() as disc_tape, tf.GradientTape() as gen_tape:
            generated_images = self.generator(noise, training=True)

            real_output = self.discriminator(images, training=True)
            fake_output = self.discriminator(generated_images, training=True)

            disc_loss = tf.reduce_mean(fake_output) - tf.reduce_mean(real_output)

            # Gradient penalty
            epsilon = tf.random.uniform([], 0.0, 1.0)
            interpolated = epsilon * images + (1 - epsilon) * generated_images
            with tf.GradientTape() as gp_tape:
                gp_tape.watch(interpolated)
                interpolated_output = self.discriminator(interpolated, training=True)
            gradients = gp_tape.gradient(interpolated_output, [interpolated])[0]
            slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3]))
            gradient_penalty = tf.reduce_mean((slopes - 1.0) ** 2)

            disc_loss += self.gp_weight * gradient_penalty

            gen_loss = -tf.reduce_mean(fake_output)

        disc_grads = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)
        gen_grads = gen_tape.gradient(gen_loss, self.generator.trainable_variables)

        # Gradient clipping
        disc_grads = [tf.clip_by_value(g, -1.0, 1.0) for g in disc_grads]
        gen_grads = [tf.clip_by_value(g, -1.0, 1.0) for g in gen_grads]

        self.discriminator_optimizer.apply_gradients(zip(disc_grads, self.discriminator.trainable_variables))
        self.generator_optimizer.apply_gradients(zip(gen_grads, self.generator.trainable_variables))

        return disc_loss, disc_grads, gen_loss, gen_grads

    def train(self, train_dataset, epochs=300, batch_size=64, discriminator_iters=5, gp_weight=10, plot=True,
              save_plots=True):
        dataset = tf.data.Dataset.from_tensor_slices(train_dataset.astype('float32')).shuffle(
            train_dataset.shape[0]).batch(batch_size)
        N_batch = np.ceil(train_dataset.shape[0] / float(batch_size))
        self.discriminator_iters = discriminator_iters
        self.gp_weight = gp_weight

        history = self.history
        gen_loss_history, disc_loss_history = [], []
        gen_grads_history, disc_grads_history = [], []

        for epoch in range(self.start_epoch, epochs):
            start = time.time()
            gen_loss, disc_loss, disc_grads, gen_grads = 0, 0, 0, 0

            with tqdm(total=N_batch, position=0, leave=True) as pbar:
                for image_batch in dataset:
                    disc_loss_batch, disc_grads_batch, gen_loss_batch, gen_grads_batch = self.train_step(image_batch)

                    # 梯度扁平化并拼接成一个大的一维张量
                    disc_grads_batch_flat = tf.concat([tf.reshape(grad, [-1]) for grad in disc_grads_batch], axis=0)
                    gen_grads_batch_flat = tf.concat([tf.reshape(grad, [-1]) for grad in gen_grads_batch], axis=0)

                    disc_loss_batch = tf.reduce_mean(disc_loss_batch).numpy() / float(self.discriminator_iters)
                    disc_grads_batch = tf.reduce_mean(
                        tf.sqrt(tf.reduce_sum(tf.square(disc_grads_batch_flat)))).numpy() / float(
                        self.discriminator_iters)
                    gen_loss_batch = tf.reduce_mean(gen_loss_batch).numpy()
                    gen_grads_batch = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(gen_grads_batch_flat)))).numpy()

                    gen_loss += gen_loss_batch / float(N_batch)
                    disc_loss += disc_loss_batch / float(N_batch)
                    gen_grads += gen_grads_batch / float(N_batch)
                    disc_grads += disc_grads_batch / float(N_batch)

                    # 检查梯度和损失是否为 nan
                    if tf.math.is_nan(disc_loss_batch) or tf.math.is_nan(gen_loss_batch):
                        print(f"NaN detected at epoch {epoch + 1}. Adjusting learning rate and retrying.")
                        self.discriminator_optimizer.learning_rate.assign(
                            self.discriminator_optimizer.learning_rate * 0.5)
                        self.generator_optimizer.learning_rate.assign(self.generator_optimizer.learning_rate * 0.5)
                        break

                    pbar.update()

                if tf.math.is_nan(disc_loss_batch) or tf.math.is_nan(gen_loss_batch):
                    continue

            pbar.close()
            gen_loss_history.append(gen_loss)
            disc_loss_history.append(disc_loss)
            gen_grads_history.append(gen_grads)
            disc_grads_history.append(disc_grads)

            print('Epoch #: {}/{}, Time taken: {} secs,\n Grads: disc= {}, gen= {},\n Losses: disc= {}, gen= {}'.format(
                epoch + 1, epochs, time.time() - start, disc_grads, gen_grads, disc_loss, gen_loss))

            if (epoch + 1) % 20 == 0 and plot:
                self.plot_losses(gen_loss_history, disc_loss_history, save_plots)
                self.plot_gradients(gen_grads_history, disc_grads_history, save_plots)

            if (epoch + 1) % 40 == 0:
                self.save_checkpoint(epoch + 1)

        history['grads']['gen'], history['grads']['disc'] = gen_grads_history, disc_grads_history
        history['loss']['gen'], history['loss']['disc'] = gen_loss_history, disc_loss_history
        self.history = history

        return history

    def plot_losses(self, gen_loss_history, disc_loss_history, save_plots):
        plt.figure()
        plt.plot(gen_loss_history, 'r')
        plt.plot(disc_loss_history, 'b')
        plt.title('Loss history')
        plt.xlabel('Epochs')
        plt.ylabel('Loss')
        plt.legend(['Generator', 'Discriminator'])
        if save_plots:
            plt.savefig('loss_history.png')
        plt.show()

    def plot_gradients(self, gen_grads_history, disc_grads_history, save_plots):
        plt.figure()
        plt.plot(gen_grads_history, 'r')
        plt.plot(disc_grads_history, 'b')
        plt.title('Gradient history')
        plt.xlabel('Epochs')
        plt.ylabel('Gradients (L2 norm)')
        plt.legend(['Generator', 'Discriminator'])
        if save_plots:
            plt.savefig('grad_history.png')
        plt.show()

    def save_checkpoint(self, epoch):
        self.generator.save(os.path.join(self.checkpoint_dir, f'generator_epoch_{epoch}.h5'))
        self.discriminator.save(os.path.join(self.checkpoint_dir, f'discriminator_epoch_{epoch}.h5'))
        np.save(os.path.join(self.checkpoint_dir, 'history.npy'), self.history)
        np.save(os.path.join(self.checkpoint_dir, 'start_epoch.npy'), epoch)

    def load_checkpoint(self):
        if os.path.exists(os.path.join(self.checkpoint_dir, 'start_epoch.npy')):
            self.start_epoch = int(np.load(os.path.join(self.checkpoint_dir, 'start_epoch.npy')))
            self.history = np.load(os.path.join(self.checkpoint_dir, 'history.npy'), allow_pickle=True).item()
            print(f"Resuming training from epoch {self.start_epoch}")
        else:
            self.start_epoch = 0
            self.history = {'grads': {}, 'loss': {}}

    def signal_handler(self, sig, frame):
        print('Training interrupted. Saving checkpoint...')
        self.save_checkpoint(self.start_epoch)
        exit(0)


# 数据加载和预处理
def load_BCI42_data(dataset_path, data_file):
    data_path = os.path.join(dataset_path, data_file + '_data.npy')
    label_path = os.path.join(dataset_path, data_file + '_label.npy')

    data = np.load(data_path)
    label = np.load(label_path).squeeze() - 1

    print(data_file, 'load success')

    # Shuffle
    indices = np.arange(data.shape[0])
    np.random.shuffle(indices)
    data = data[indices]
    label = label[indices]

    print('Data shape: ', data.shape)
    print('Label shape: ', label.shape)

    return data, label

if __name__ == "__main__":
    dataset_path = r'D:\EEG-TransNet-main(1)\my-warehouse\EEG\EEG-TransNet-main\data\dataset\bci_iv_2a'
    data_files = ['A01T', 'A02T', 'A03T', 'A04T', 'A05T', 'A06T', 'A07T', 'A08T', 'A09T']

    for data_file in data_files:
        print(f"Processing {data_file}...")

        # 加载单个数据文件
        data, label = load_BCI42_data(dataset_path, data_file)

        # 标准化处理前的数据形状打印
        original_shape = data.shape
        print(f"{data_file} original shape:", original_shape)

        # 数据扁平化以便StandardScaler处理
        data_flat = data.reshape(original_shape[0], -1)
        scaler = StandardScaler()
        data_flat = scaler.fit_transform(data_flat)

        # 恢复原始形状
        data = data_flat.reshape(original_shape)

        # 将标签转换为分类格式
        labels = to_categorical(label)

        # 分割数据为训练集和测试集
        x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)

        # 根据需要重塑数据
        x_train = x_train.reshape(-1, 1, 22, 1125)
        x_test = x_test.reshape(-1, 1, 22, 1125)

        print(f"{data_file} adjusted shape of x_train:", x_train.shape)
        print(f"{data_file} adjusted shape of x_test:", x_test.shape)

        # Setup optimizers
        gen_optim = Adam(1e-5, beta_1=0.5)
        disc_optim = RMSprop(5e-5)

        # Initialize WGAN_GP
        wgan_gp = WGAN_GP(gen_optim, disc_optim, input_dim=22 * 1125, checkpoint_dir=f'./checkpoints/{data_file}')

        # Train the model
        history = wgan_gp.train(x_train, epochs=120, batch_size=32, discriminator_iters=5, gp_weight=10)

        # Generate fake data
        generated_samples, noise = wgan_gp.generate_fake_data(N=256)

        # Save or further process the generated samples
        np.save(f'generated_samples_{data_file}.npy', generated_samples)
        np.save(f'noise_{data_file}.npy', noise)