import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# 修复matplotlib中文显示问题
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS', 'sans-serif']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS', 'sans-serif']
plt.rcParams['axes.unicode_minus'] = False

from torch.utils.data import DataLoader
import time
import argparse
import sys

# 添加trainCode目录到Python路径
sys.path.append('trainCode')

# 导入所有模型
from RAE_GCN_Transformer_BSM1 import RAE_GCN_Transformer, BSM1Dataset
from Transformer_BSM1 import Transformer
from GCN_BSM1 import GCN
from GCN_Transformer_BSM1 import GCN_Transformer
from RAE_Transformer_BSM1 import RAE_Transformer

def train_model(model_class, model_name, device, epochs=200, batch_size=512, learning_rate=5e-4, weight_decay=4e-5, seq_len=10, connectivity_ratio=0.5):
    """训练指定模型"""
    print(f"\n开始训练 {model_name} 模型...")
    if model_name == 'RAE_GCN_Transformer':
        print(f"使用AdaptiveSparseAttention，连接密度: {connectivity_ratio}")
    
    # 创建model文件夹（如果不存在）
    os.makedirs('model', exist_ok=True)
    
    # 加载训练数据集
    train_data = BSM1Dataset('BSM1_WWTP_data/train_data.csv', seq_len=seq_len)
    
    # 从训练数据集中划分出验证集（20%）
    train_size = int(len(train_data) * 0.8)
    val_size = len(train_data) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(train_data, [train_size, val_size])
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    
    # 获取数据集信息
    input_dim = train_data.data.shape[1]
    num_classes = len(np.unique(train_data.labels))
    
    # 初始化模型
    if model_name == 'RAE_GCN_Transformer':
        model = model_class(input_dim=input_dim, seq_len=seq_len, num_classes=num_classes, 
                           connectivity_ratio=connectivity_ratio).to(device)
    else:
        model = model_class(input_dim=input_dim, seq_len=seq_len, num_classes=num_classes).to(device)
    
    # 检查模型参数
    if hasattr(model, 'embed_dim') and hasattr(model, 'num_heads'):
        embed_dim = getattr(model, 'embed_dim')
        num_heads = getattr(model, 'num_heads')
        if embed_dim % num_heads != 0:
            raise ValueError(f"模型 {model_name} 的 embed_dim({embed_dim}) 不能被 num_heads({num_heads}) 整除")
        else:
            print(f"模型参数检查：embed_dim({embed_dim}) 可以被 num_heads({num_heads}) 整除")
    
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    criterion = nn.CrossEntropyLoss()
    
    # 记录训练过程
    loss_train = []
    loss_val = []
    acc_train = []
    acc_val = []
    best_val_acc = 0.0
    training_time = 0
    
    # 训练循环
    start_time = time.time()
    for epoch in range(epochs):
        epoch_start = time.time()
        
        # 训练
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        
        for x, y in train_loader:
            x = x.to(device)
            y = y.to(device).long()
            
            optimizer.zero_grad()
            
            # 正向传播
            out = model(x)
            
            # 计算损失
            loss = criterion(out, y)
            
            # 反向传播和优化
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item()
            
            # 计算准确率
            _, predicted = torch.max(out.data, 1)
            train_total += y.size(0)
            train_correct += (predicted == y).sum().item()
        
        train_acc = train_correct / train_total
        
        # 验证
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        
        with torch.no_grad():
            for x, y in val_loader:
                x = x.to(device)
                y = y.to(device).long()
                
                out = model(x)
                loss = criterion(out, y)
                
                val_loss += loss.item()
                
                # 计算准确率
                _, predicted = torch.max(out.data, 1)
                val_total += y.size(0)
                val_correct += (predicted == y).sum().item()
        
        val_acc = val_correct / val_total
        
        # 记录指标
        loss_train.append(train_loss/len(train_loader))
        loss_val.append(val_loss/len(val_loader))
        acc_train.append(train_acc)
        acc_val.append(val_acc)
        
        # 更新最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), f'model/model_{model_name}_BSM1.pth')
            print(f"保存最佳模型，验证准确率: {val_acc:.4f}")
        
        # 计算本轮耗时
        epoch_time = time.time() - epoch_start
        training_time += epoch_time
        
        # 每10轮显示一次进度
        if (epoch + 1) % 10 == 0:
            print(f"Epoch {epoch+1}/{epochs}: "
                  f"训练损失={train_loss/len(train_loader):.4f}, 训练准确率={train_acc:.4f}, "
                  f"验证损失={val_loss/len(val_loader):.4f}, 验证准确率={val_acc:.4f}, "
                  f"耗时={epoch_time:.2f}秒")
    
    # 总训练时间
    total_time = time.time() - start_time
    print(f"\n{model_name} 训练完成，总耗时: {total_time:.2f}秒, 平均每轮: {training_time/epochs:.2f}秒")
    print(f"最佳验证集准确率: {best_val_acc:.4f}")
    
    # 保存训练结果
    np.save(f'model/acc_train_{model_name}_BSM1.npy', np.array(acc_train))
    np.save(f'model/acc_val_{model_name}_BSM1.npy', np.array(acc_val))
    np.save(f'model/loss_train_{model_name}_BSM1.npy', np.array(loss_train))
    np.save(f'model/loss_val_{model_name}_BSM1.npy', np.array(loss_val))
    
    # 绘制损失和准确率曲线
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(loss_train, label='训练损失')
    plt.plot(loss_val, label='验证损失')
    plt.xlabel('训练轮次')
    plt.ylabel('损失')
    plt.title(f'{model_name} 损失曲线')
    plt.legend()
    plt.grid(True)
    
    plt.subplot(1, 2, 2)
    plt.plot(acc_train, label='训练准确率')
    plt.plot(acc_val, label='验证准确率')
    plt.xlabel('训练轮次')
    plt.ylabel('准确率')
    plt.title(f'{model_name} 准确率曲线')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(f'model/training_curves_{model_name}.png', dpi=300)
    
    return best_val_acc

def main():
    parser = argparse.ArgumentParser(description='训练深度学习模型')
    parser.add_argument('--models', nargs='+', choices=['all', 'RAE_GCN_Transformer', 'Transformer', 'GCN', 'GCN_Transformer', 'RAE_Transformer'], 
                        default=['all'], help='指定要训练的模型，可以是单个模型或多个模型')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮次')
    parser.add_argument('--batch_size', type=int, default=512, help='批次大小')
    parser.add_argument('--lr', type=float, default=5e-4, help='学习率')
    parser.add_argument('--wd', type=float, default=4e-5, help='权重衰减')
    parser.add_argument('--seq_len', type=int, default=10, help='序列长度')
    parser.add_argument('--no_gpu', action='store_true', help='不使用GPU')
    parser.add_argument('--connectivity_ratio', type=float, default=0.5, help='AdaptiveSparseAttention的连接密度 (0.3-0.7, 推荐0.5)')
    
    args = parser.parse_args()
    
    device = torch.device('cpu' if args.no_gpu or not torch.cuda.is_available() else 'cuda')
    print(f"使用设备: {device}")
    print(f"RAE-GCN-Transformer使用AdaptiveSparseAttention，连接密度: {args.connectivity_ratio}")
    
    # 所有模型字典
    model_dict = {
        'RAE_GCN_Transformer': RAE_GCN_Transformer,
        'Transformer': Transformer,
        'GCN': GCN,
        'GCN_Transformer': GCN_Transformer,
        'RAE_Transformer': RAE_Transformer
    }
    
    # 确定要训练的模型
    models_to_train = list(model_dict.keys()) if 'all' in args.models else args.models
    
    # 训练结果
    results = {}
    
    # 依次训练选定的模型
    for model_name in models_to_train:
        model_class = model_dict[model_name]
        best_val_acc = train_model(
            model_class=model_class,
            model_name=model_name,
            device=device,
            epochs=args.epochs,
            batch_size=args.batch_size,
            learning_rate=args.lr,
            weight_decay=args.wd,
            seq_len=args.seq_len,
            connectivity_ratio=args.connectivity_ratio
        )
        results[model_name] = best_val_acc
    
    # 打印所有模型的最佳验证准确率
    print("\n训练完成，所有模型的最佳验证准确率:")
    print("-" * 40)
    for model_name, acc in results.items():
        print(f"{model_name}: {acc:.4f}")
    print("-" * 40)

if __name__ == '__main__':
    main() 