import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import os
import time
from datetime import datetime
import matplotlib.pyplot as plt
from tqdm import tqdm
from eegGAN.model import UNetGenerator1D, DualPathDiscriminator
from eegGAN.data import MyDataset


class GANTrainer:
    def __init__(self, generator, discriminator, device, config):
        """
        GAN训练器

        Args:
            generator: 生成器模型
            discriminator: 判别器模型
            device: 训练设备
            config: 训练配置字典
        """
        self.generator = generator
        self.discriminator = discriminator
        self.device = device
        self.config = config

        # 优化器
        self.g_optimizer = optim.Adam(
            generator.parameters(),
            lr=config.get('g_lr', 0.0002),
            betas=config.get('betas', (0.5, 0.999))
        )

        self.d_optimizer = optim.Adam(
            discriminator.parameters(),
            lr=config.get('d_lr', 0.0002),
            betas=config.get('betas', (0.5, 0.999))
        )

        # 损失函数
        self.adversarial_loss = nn.BCELoss()
        self.bis_loss = nn.MSELoss()

        # 训练记录
        self.train_history = {
            'g_loss': [],
            'd_loss': [],
            'd_real_loss': [],
            'd_fake_loss': [],
            'bis_loss': [],
            'g_adv_loss': [],
            'g_bis_loss': []
        }

        # 验证记录
        self.val_history = {
            'g_loss': [],
            'd_loss': [],
            'bis_mae': []
        }

        # 最佳模型权重
        self.best_g_loss = float('inf')
        self.best_d_loss = float('inf')

        # 创建保存目录
        self.save_dir = config.get('save_dir', 'checkpoints')
        os.makedirs(self.save_dir, exist_ok=True)

        # 记录开始时间
        self.start_time = time.time()

    def train(self, train_loader, val_loader=None, num_epochs=100):
        """
        训练GAN模型

        Args:
            train_loader: 训练数据加载器
            val_loader: 验证数据加载器
            num_epochs: 训练轮数
        """
        print(f"开始训练GAN模型，共{num_epochs}个epochs")
        print(f"设备: {self.device}")
        print(f"保存目录: {self.save_dir}")

        for epoch in range(num_epochs):
            # 训练一个epoch
            epoch_g_loss, epoch_d_loss = self._train_epoch(train_loader, epoch, num_epochs)

            # 验证
            if val_loader is not None and (epoch + 1) % self.config.get('val_interval', 5) == 0:
                val_g_loss, val_d_loss, bis_mae = self.validate(val_loader)
                self.val_history['g_loss'].append(val_g_loss)
                self.val_history['d_loss'].append(val_d_loss)
                self.val_history['bis_mae'].append(bis_mae)

                print(f"验证结果 - G_loss: {val_g_loss:.4f}, D_loss: {val_d_loss:.4f}, BIS_MAE: {bis_mae:.4f}")

                # 保存最佳模型
                if val_g_loss < self.best_g_loss:
                    self.best_g_loss = val_g_loss
                    self._save_checkpoint(epoch, is_best=True)
                    print(f"保存最佳生成器模型，epoch {epoch + 1}")

            # 定期保存检查点
            if (epoch + 1) % self.config.get('save_interval', 10) == 0:
                self._save_checkpoint(epoch)
                print(f"保存检查点，epoch {epoch + 1}")

            # 记录训练历史
            self.train_history['g_loss'].append(epoch_g_loss)
            self.train_history['d_loss'].append(epoch_d_loss)

            # 打印训练进度
            elapsed_time = time.time() - self.start_time
            eta = elapsed_time / (epoch + 1) * (num_epochs - epoch - 1)
            print(
                f"Epoch {epoch + 1}/{num_epochs} - G_loss: {epoch_g_loss:.4f}, D_loss: {epoch_d_loss:.4f} - ETA: {eta / 60:.1f}分钟")

    def _train_epoch(self, train_loader, epoch, num_epochs):
        """
        训练一个epoch
        """
        self.generator.train()
        self.discriminator.train()

        epoch_g_loss = 0
        epoch_d_loss = 0
        epoch_d_real_loss = 0
        epoch_d_fake_loss = 0
        epoch_bis_loss = 0
        epoch_g_adv_loss = 0
        epoch_g_bis_loss = 0

        batch_count = 0

        progress_bar = tqdm(train_loader, desc=f'Epoch {epoch + 1}/{num_epochs}')

        for batch_idx, (real_eeg, real_bis) in enumerate(progress_bar):
            batch_size = real_eeg.size(0)

            # 准备数据
            real_eeg = real_eeg.to(self.device)
            real_bis = real_bis.to(self.device)

            # 创建标签
            real_labels = torch.ones(batch_size, 1, device=self.device)
            fake_labels = torch.zeros(batch_size, 1, device=self.device)

            # ---------------------
            #  训练判别器
            # ---------------------
            self.d_optimizer.zero_grad()

            # 真实样本
            real_adv_out, real_bis_pred = self.discriminator(real_eeg)
            d_real_loss = self.adversarial_loss(real_adv_out, real_labels)
            d_bis_loss = self.bis_loss(real_bis_pred, real_bis.unsqueeze(1))

            # 生成样本
            noise = torch.randn(batch_size, self.config['noise_dim'], device=self.device)
            fake_eeg = self.generator(noise, real_bis)
            fake_adv_out, _ = self.discriminator(fake_eeg.detach())
            d_fake_loss = self.adversarial_loss(fake_adv_out, fake_labels)

            # 判别器总损失
            d_loss = d_real_loss + d_fake_loss + d_bis_loss * self.config.get('bis_loss_weight', 1.0)
            d_loss.backward()
            self.d_optimizer.step()

            # ---------------------
            #  训练生成器
            # ---------------------
            self.g_optimizer.zero_grad()

            fake_adv_out, fake_bis_pred = self.discriminator(fake_eeg)
            g_adv_loss = self.adversarial_loss(fake_adv_out, real_labels)
            g_bis_loss = self.bis_loss(fake_bis_pred, real_bis.unsqueeze(1))

            # 生成器总损失
            g_loss = g_adv_loss + g_bis_loss * self.config.get('bis_loss_weight', 1.0)
            g_loss.backward()
            self.g_optimizer.step()

            # 记录损失
            epoch_g_loss += g_loss.item()
            epoch_d_loss += d_loss.item()
            epoch_d_real_loss += d_real_loss.item()
            epoch_d_fake_loss += d_fake_loss.item()
            epoch_bis_loss += d_bis_loss.item()
            epoch_g_adv_loss += g_adv_loss.item()
            epoch_g_bis_loss += g_bis_loss.item()
            batch_count += 1

            # 更新进度条
            progress_bar.set_postfix({
                'G_loss': f'{g_loss.item():.4f}',
                'D_loss': f'{d_loss.item():.4f}'
            })

        # 计算平均损失
        epoch_g_loss /= batch_count
        epoch_d_loss /= batch_count
        epoch_d_real_loss /= batch_count
        epoch_d_fake_loss /= batch_count
        epoch_bis_loss /= batch_count
        epoch_g_adv_loss /= batch_count
        epoch_g_bis_loss /= batch_count

        # 更新训练历史
        self.train_history['d_real_loss'].append(epoch_d_real_loss)
        self.train_history['d_fake_loss'].append(epoch_d_fake_loss)
        self.train_history['bis_loss'].append(epoch_bis_loss)
        self.train_history['g_adv_loss'].append(epoch_g_adv_loss)
        self.train_history['g_bis_loss'].append(epoch_g_bis_loss)

        return epoch_g_loss, epoch_d_loss

    def validate(self, val_loader):
        """
        在验证集上评估模型
        """
        self.generator.eval()
        self.discriminator.eval()

        total_g_loss = 0
        total_d_loss = 0
        total_bis_mae = 0
        batch_count = 0

        with torch.no_grad():
            for real_eeg, real_bis in val_loader:
                batch_size = real_eeg.size(0)

                # 准备数据
                real_eeg = real_eeg.to(self.device)
                real_bis = real_bis.to(self.device)

                # 创建标签
                real_labels = torch.ones(batch_size, 1, device=self.device)
                fake_labels = torch.zeros(batch_size, 1, device=self.device)

                # 判别器在真实样本上的表现
                real_adv_out, real_bis_pred = self.discriminator(real_eeg)
                d_real_loss = self.adversarial_loss(real_adv_out, real_labels)
                d_bis_loss = self.bis_loss(real_bis_pred, real_bis.unsqueeze(1))

                # 生成样本
                noise = torch.randn(batch_size, self.config['noise_dim'], device=self.device)
                fake_eeg = self.generator(noise, real_bis)
                fake_adv_out, fake_bis_pred = self.discriminator(fake_eeg)

                # 判别器在生成样本上的表现
                d_fake_loss = self.adversarial_loss(fake_adv_out, fake_labels)

                # 生成器损失
                g_adv_loss = self.adversarial_loss(fake_adv_out, real_labels)
                g_bis_loss = self.bis_loss(fake_bis_pred, real_bis.unsqueeze(1))

                # 总损失
                d_loss = d_real_loss + d_fake_loss + d_bis_loss * self.config.get('bis_loss_weight', 1.0)
                g_loss = g_adv_loss + g_bis_loss * self.config.get('bis_loss_weight', 1.0)

                # 计算BIS预测的MAE
                bis_mae = torch.abs(fake_bis_pred - real_bis.unsqueeze(1)).mean().item()

                total_g_loss += g_loss.item()
                total_d_loss += d_loss.item()
                total_bis_mae += bis_mae
                batch_count += 1

        return total_g_loss / batch_count, total_d_loss / batch_count, total_bis_mae / batch_count

    def _save_checkpoint(self, epoch, is_best=False):
        """
        保存模型检查点
        """
        checkpoint = {
            'epoch': epoch + 1,
            'generator_state_dict': self.generator.state_dict(),
            'discriminator_state_dict': self.discriminator.state_dict(),
            'g_optimizer_state_dict': self.g_optimizer.state_dict(),
            'd_optimizer_state_dict': self.d_optimizer.state_dict(),
            'train_history': self.train_history,
            'val_history': self.val_history,
            'config': self.config
        }

        if is_best:
            filename = f'best_model_epoch_{epoch + 1}.pth'
        else:
            filename = f'checkpoint_epoch_{epoch + 1}.pth'

        filepath = os.path.join(self.save_dir, filename)
        torch.save(checkpoint, filepath)

        # 同时保存最佳模型的独立文件
        if is_best:
            best_filepath = os.path.join(self.save_dir, 'best_model.pth')
            torch.save(checkpoint, best_filepath)

    def load_checkpoint(self, checkpoint_path):
        """
        加载模型检查点
        """
        checkpoint = torch.load(checkpoint_path, map_location=self.device)

        self.generator.load_state_dict(checkpoint['generator_state_dict'])
        self.discriminator.load_state_dict(checkpoint['discriminator_state_dict'])
        self.g_optimizer.load_state_dict(checkpoint['g_optimizer_state_dict'])
        self.d_optimizer.load_state_dict(checkpoint['d_optimizer_state_dict'])

        if 'train_history' in checkpoint:
            self.train_history = checkpoint['train_history']
        if 'val_history' in checkpoint:
            self.val_history = checkpoint['val_history']

        print(f"加载检查点: {checkpoint_path}, epoch: {checkpoint['epoch']}")
        return checkpoint['epoch']

    def plot_training_history(self, save_path=None):
        """
        绘制训练历史
        """
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))

        # 生成器和判别器损失
        epochs = range(1, len(self.train_history['g_loss']) + 1)

        axes[0, 0].plot(epochs, self.train_history['g_loss'], label='Generator Loss')
        axes[0, 0].plot(epochs, self.train_history['d_loss'], label='Discriminator Loss')
        axes[0, 0].set_title('Generator and Discriminator Loss')
        axes[0, 0].set_xlabel('Epoch')
        axes[0, 0].set_ylabel('Loss')
        axes[0, 0].legend()
        axes[0, 0].grid(True)

        # 判别器详细损失
        axes[0, 1].plot(epochs, self.train_history['d_real_loss'], label='D Real Loss')
        axes[0, 1].plot(epochs, self.train_history['d_fake_loss'], label='D Fake Loss')
        axes[0, 1].plot(epochs, self.train_history['bis_loss'], label='BIS Loss')
        axes[0, 1].set_title('Discriminator Detailed Loss')
        axes[0, 1].set_xlabel('Epoch')
        axes[0, 1].set_ylabel('Loss')
        axes[0, 1].legend()
        axes[0, 1].grid(True)

        # 生成器详细损失
        axes[1, 0].plot(epochs, self.train_history['g_adv_loss'], label='G Adv Loss')
        axes[1, 0].plot(epochs, self.train_history['g_bis_loss'], label='G BIS Loss')
        axes[1, 0].set_title('Generator Detailed Loss')
        axes[1, 0].set_xlabel('Epoch')
        axes[1, 0].set_ylabel('Loss')
        axes[1, 0].legend()
        axes[1, 0].grid(True)

        # 验证损失
        if self.val_history['g_loss']:
            val_epochs = range(1, len(self.val_history['g_loss']) + 1)
            axes[1, 1].plot(val_epochs, self.val_history['g_loss'], label='Val G Loss')
            axes[1, 1].plot(val_epochs, self.val_history['d_loss'], label='Val D Loss')
            axes[1, 1].plot(val_epochs, self.val_history['bis_mae'], label='Val BIS MAE')
            axes[1, 1].set_title('Validation Metrics')
            axes[1, 1].set_xlabel('Validation Epoch')
            axes[1, 1].set_ylabel('Value')
            axes[1, 1].legend()
            axes[1, 1].grid(True)

        plt.tight_layout()

        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')

        plt.show()

    def generate_samples(self, num_samples, bis_values=None):
        """
        生成脑电信号样本

        Args:
            num_samples: 样本数量
            bis_values: BIS值列表，如果为None则随机生成

        Returns:
            生成的脑电信号
        """
        self.generator.eval()

        if bis_values is None:
            bis_values = torch.rand(num_samples, 1) * 100  # 随机BIS值 0-100

        bis_values = bis_values.to(self.device)
        noise = torch.randn(num_samples, self.config['noise_dim'], device=self.device)

        with torch.no_grad():
            generated_eeg = self.generator(noise, bis_values)

        return generated_eeg.cpu()


# 使用示例
if __name__ == "__main__":
    # 配置参数
    config = {
        'noise_dim': 128,
        'g_lr': 0.0002,
        'd_lr': 0.0002,
        'betas': (0.5, 0.999),
        'bis_loss_weight': 1.0,
        'val_interval': 5,
        'save_interval': 10,
        'save_dir': 'gan_checkpoints'
    }

    # 设备设置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建模型
    generator = UNetGenerator1D(
        noise_dim=config['noise_dim'],
        bis_dim=1,
        output_dim=256,
        hidden_dim=64
    ).to(device)

    discriminator = DualPathDiscriminator(
        input_dim=256,
        hidden_dim=64,
        bis_prediction=True
    ).to(device)

    # 创建训练器
    trainer = GANTrainer(generator, discriminator, device, config)


    # 创建数据加载器
    train_dataset = MyDataset()
    val_dataset = MyDataset(200)

    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)

    # 开始训练
    trainer.train(train_loader, val_loader, num_epochs=5000)

    # 绘制训练历史
    trainer.plot_training_history('training_history.png')

    # 生成样本
    samples = trainer.generate_samples(5)
    print(f"生成的样本形状: {samples.shape}")