import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
import os
import numpy as np
import matplotlib.pyplot as plt
import yaml
import json  # 新增导入
import time
from utils.freq import compute_spectrum
from models.condition_diffusion_model import ConditionedDDPM, DDPMTrainer
from data.bjut import single_domain_dataloader, load_processed_data


def train_ddpm():
    # 1. 配置参数
    with open('config/train_ddpm.yaml', 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)

    config = config['train_ddpm']
    
    # 2. 创建包含训练参数的实验名称
    exp_name = (f"{config['domain_name']}_"
               f"lr{config['lr']}_"
               f"bs{config['batch_size']}_"
               f"ep{config['num_epochs']}_"
               f"seed{config['seed']}_"
               f"{time.strftime('%Y%m%d_%H%M%S')}")
    
    # 3. 创建模型保存目录
    save_dir = os.path.join(config['save_dir'], exp_name)
    os.makedirs(save_dir, exist_ok=True)

    # # 新增：保存配置文件
    # config_save_path = os.path.join(save_dir, 'config.json')
    # with open(config_save_path, 'w', encoding='utf-8') as f:
    #     json.dump(config, f, indent=4, ensure_ascii=False)
    # print(f"配置已保存到: {config_save_path}")

    # 4. 准备数据
    train_loader = single_domain_dataloader(
        data_path=config['data_path'],
        domain_name=config['domain_name'],
        batch_size=config['batch_size']
    )

    # 5. 初始化模型
    ddpm = ConditionedDDPM(
        num_classes=config['num_classes'],
        cond_dim=config['cond_dim'],
        timesteps=config['timesteps'],
        use_features=False  # 仅使用标签条件
    )

    # 4. 初始化训练器
    config['lr'] = float(config['lr'])
    trainer_model = DDPMTrainer(ddpm, lr=config['lr'])

    # 新增：如果提供了检查点路径，加载模型权重
    if config['continue_train']:
        print(f"从检查点加载模型: {config['checkpoint_path']}")
        checkpoint = torch.load(config['checkpoint_path'])
        trainer_model.load_state_dict(checkpoint['state_dict'])

    # 5. 设置回调和日志
    checkpoint_callback = ModelCheckpoint(
        dirpath=save_dir,
        filename='ddpm-{epoch:02d}-{train_loss:.4f}',
        save_top_k=3,
        monitor='loss/train_loss',
        mode='min'
    )

    # 修改logger的保存路径
    logger = TensorBoardLogger(
        save_dir=config['log_dir'],
        name=exp_name,  # 使用相同的实验名称
        version=None  # 自动创建版本子目录
    )
    logger.log_hyperparams(config)

    # 6. 初始化PyTorch Lightning训练器
    trainer = pl.Trainer(
        max_epochs=config['num_epochs'],
        accelerator='gpu',  # 指定使用GPU
        devices=[0],        # 使用第一个GPU，可以根据需要修改
        callbacks=[checkpoint_callback],
        logger=logger,
        log_every_n_steps=10,
        precision=16        # 使用混合精度训练以提高性能
    )

    # 7. 开始训练
    trainer.fit(trainer_model, train_loader)

    # 8. 保存最终模型
    final_model_path = os.path.join(save_dir, 'final_model.pt')
    torch.save(ddpm.state_dict(), final_model_path)

def generate_samples(checkpoint_path, num_samples=10):
    """生成样本并保存"""
    # 0. 创建保存目录
    os.makedirs("generated_samples", exist_ok=True)  # 新增目录创建
    
    # 1. 加载模型
    config = {
        'num_classes': 5,
        'cond_dim': 256,
        'timesteps': 1500
    }

    device = torch.device('cuda:0')  # 指定使用第一个GPU

    ddpm = ConditionedDDPM(
        num_classes=config['num_classes'],
        cond_dim=config['cond_dim'],
        timesteps=config['timesteps'],
        use_features=False
    )
    
    ddpm.load_state_dict(torch.load(checkpoint_path))
    ddpm = ddpm.to(device)
    ddpm.eval()

    # 2. 为每个类别生成样本
    with torch.no_grad():
        for label in range(config['num_classes']):
            # 准备条件标签
            labels = torch.full((num_samples,), label, device=device)
            
            # 生成样本
            samples, _ = ddpm.sample(labels)
            
            # 保存样本
            save_path = f"generated_samples/class_{label}.pt"
            torch.save(samples.cpu(), save_path)
            print(f"Generated {num_samples} samples for class {label}")

def analyze_spectra():
    """分析真实样本和生成样本的频谱特征"""
    # 1. 加载真实样本（直接使用load_processed_data）
    domains = load_processed_data('data/train_data/BJUT_1024', ['bjut_speed_20'])
    source_domain = 'bjut_speed_20'
    
    # 按类别分离真实样本
    real_samples = {}
    all_data = domains[source_domain]['data']
    all_labels = domains[source_domain]['labels']
    for label in range(5):
        real_samples[label] = all_data[all_labels == label]

    # 2. 加载生成样本
    generated_samples = {}
    for label in range(5):
        samples = torch.load(f"generated_samples/class_{label}.pt")
        generated_samples[label] = samples.numpy()

    # 3. 频谱分析
    plt.figure(figsize=(15, 20))
    for label in range(5):
        # 处理真实样本频谱
        real_spectra = []
        for signal in real_samples[label]:
            freq, mag = compute_spectrum(signal.squeeze())
            real_spectra.append(mag)
        real_avg = np.mean(real_spectra, axis=0)

        # 处理生成样本频谱
        gen_spectra = []
        for sample in generated_samples[label]:
            signal = sample.squeeze()
            freq, mag = compute_spectrum(signal)
            gen_spectra.append(mag)
        gen_avg = np.mean(gen_spectra, axis=0)

        # 计算相关系数
        correlation = np.corrcoef(real_avg, gen_avg)[0, 1]

        # 绘制对比图
        plt.subplot(5, 1, label+1)
        plt.plot(freq[:len(real_avg)], real_avg, label='Real')
        plt.plot(freq[:len(gen_avg)], gen_avg, label='Generated')
        plt.title(f'Class {label} - Correlation: {correlation:.2f}')
        plt.legend()

    plt.tight_layout()
    plt.savefig('spectrum_comparison.png')
    print("频谱对比图已保存为 spectrum_comparison.png")

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--mode', type=str, 
                       choices=['train', 'generate', 'analyze'],
                       default='train')
    parser.add_argument('--checkpoint', type=str, help='Checkpoint path for generation or continue training')
    parser.add_argument('--num_samples', type=int, default=10, help='Number of samples to generate per class')
    args = parser.parse_args()

    if args.mode == 'train':
        train_ddpm()
    elif args.mode == 'generate':
        if args.checkpoint is None:
            raise ValueError("Please provide checkpoint path for generation")
        generate_samples(args.checkpoint, args.num_samples)
    elif args.mode == 'analyze':
        analyze_spectra() 