import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import pandas as pd
import numpy as np
import os
import time

from config import MODEL_CONFIG, TRAIN_CONFIG, DATA_CONFIG, SAVE_CONFIG
from models.lstm_model import create_model
from utils import load_and_preprocess_data, load_preprocessed_dataset, plot_training_history, evaluate_model, save_model

def train_model():
    """训练LSTM模型"""
    
    # 设置设备
    device = torch.device(TRAIN_CONFIG['device'])
    print(f"使用设备: {device}")
    
    # 加载和预处理数据
    print("正在加载和预处理数据...")
    
    # 检查是否使用预划分的数据集
    if 'test_data_path' in DATA_CONFIG and DATA_CONFIG['test_data_path']:
        # 使用预划分的数据集
        X_train, X_test, y_train, y_test, scaler = load_preprocessed_dataset(
            DATA_CONFIG['data_path'],
            DATA_CONFIG['test_data_path'],
            DATA_CONFIG['feature_columns'],
            DATA_CONFIG['target_column']
        )
    else:
        # 使用原始数据并自动划分
        X_train, X_test, y_train, y_test, scaler = load_and_preprocess_data(
            DATA_CONFIG['data_path'],
            DATA_CONFIG['feature_columns'],
            DATA_CONFIG['target_column'],
            TRAIN_CONFIG['sequence_length'],
            TRAIN_CONFIG['train_split']
        )
    
    print(f"训练集大小: {X_train.shape}")
    print(f"测试集大小: {X_test.shape}")
    
    # 创建数据加载器
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=TRAIN_CONFIG['batch_size'], shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=TRAIN_CONFIG['batch_size'], shuffle=False)
    
    # 创建模型（使用带注意力机制的模型）
    print("正在创建模型...")
    model = create_model('attention', **MODEL_CONFIG)
    model.to(device)
    
    # 计算类别权重（使用更合适的权重计算方法）
    class_counts = torch.bincount(y_train.long())
    # 使用逆频率加权，给少数类别更高的权重
    total_samples = len(y_train)
    class_weights = torch.tensor([
        total_samples / (2.0 * class_counts[0]),  # 类别0权重
        total_samples / (2.0 * class_counts[1])   # 类别1权重
    ])
    
    print(f"类别计数: {class_counts}")
    print(f"类别权重: {class_weights}")
    
    # 定义损失函数和优化器（使用加权交叉熵损失）
    criterion = nn.CrossEntropyLoss(weight=class_weights.to(device))
    optimizer = optim.Adam(
        model.parameters(), 
        lr=TRAIN_CONFIG['learning_rate'],
        weight_decay=TRAIN_CONFIG.get('weight_decay', 0.0)
    )
    
    # 学习率调度器（使用余弦退火）
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer, 
        T_max=TRAIN_CONFIG['num_epochs'],
        eta_min=TRAIN_CONFIG['learning_rate'] * 0.01
    )
    
    # 训练历史记录
    train_losses = []
    val_losses = []
    train_accuracies = []
    val_accuracies = []
    
    print("开始训练...")
    start_time = time.time()
    
    for epoch in range(TRAIN_CONFIG['num_epochs']):
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item()
            _, predicted = torch.max(output.data, 1)
            train_total += target.size(0)
            train_correct += (predicted == target).sum().item()
        
        train_accuracy = train_correct / train_total
        train_losses.append(train_loss / len(train_loader))
        train_accuracies.append(train_accuracy)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output = model(data)
                loss = criterion(output, target)
                
                val_loss += loss.item()
                _, predicted = torch.max(output.data, 1)
                val_total += target.size(0)
                val_correct += (predicted == target).sum().item()
        
        val_accuracy = val_correct / val_total
        val_losses.append(val_loss / len(test_loader))
        val_accuracies.append(val_accuracy)
        
        # 更新学习率
        scheduler.step()
        
        # 打印训练进度
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch+1}/{TRAIN_CONFIG["num_epochs"]}], '
                  f'训练损失: {train_losses[-1]:.4f}, 训练准确率: {train_accuracy:.4f}, '
                  f'验证损失: {val_losses[-1]:.4f}, 验证准确率: {val_accuracy:.4f}, '
                  f'学习率: {scheduler.get_last_lr()[0]:.6f}')
    
    end_time = time.time()
    training_time = end_time - start_time
    print(f"训练完成! 总训练时间: {training_time:.2f}秒")
    
    # 绘制训练历史
    plot_training_history(train_losses, val_losses, train_accuracies, val_accuracies)
    
    # 评估模型
    print("\n正在评估模型...")
    evaluate_model(model, X_test, y_test, device)
    
    # 保存模型
    save_model(model, SAVE_CONFIG['model_save_path'])
    
    # 保存训练日志
    log_df = pd.DataFrame({
        'epoch': range(1, TRAIN_CONFIG['num_epochs'] + 1),
        'train_loss': train_losses,
        'val_loss': val_losses,
        'train_accuracy': train_accuracies,
        'val_accuracy': val_accuracies
    })
    
    os.makedirs(os.path.dirname(SAVE_CONFIG['log_save_path']), exist_ok=True)
    log_df.to_csv(SAVE_CONFIG['log_save_path'], index=False)
    print(f"训练日志已保存到: {SAVE_CONFIG['log_save_path']}")
    
    return model, scaler

if __name__ == "__main__":
    # 检查数据文件是否存在
    if not os.path.exists(DATA_CONFIG['data_path']):
        print(f"警告: 数据文件 {DATA_CONFIG['data_path']} 不存在")
        print("请确保数据文件存在或修改DATA_CONFIG中的data_path")
    else:
        # 开始训练
        model, scaler = train_model()
        
        print("\n训练完成!")
        print("模型已保存到:", SAVE_CONFIG['model_save_path'])
        print("训练日志已保存到:", SAVE_CONFIG['log_save_path'])
        print("训练历史图已保存为: training_history.png")
        print("混淆矩阵图已保存为: confusion_matrix.png")