import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, WeightedRandomSampler
import pandas as pd
import numpy as np
import os
import time
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score

from config import MODEL_CONFIG, TRAIN_CONFIG, DATA_CONFIG, SAVE_CONFIG
from models.lstm_model import create_model
from utils import load_and_preprocess_data, load_preprocessed_dataset, plot_training_history, evaluate_model, save_model

def train_advanced_model():
    """使用高级训练策略训练LSTM模型"""
    
    # 设置设备
    device = torch.device(TRAIN_CONFIG['device'])
    print(f"使用设备: {device}")
    
    # 加载和预处理数据
    print("正在加载和预处理数据...")
    
    # 检查是否使用预划分的数据集
    if 'test_data_path' in DATA_CONFIG and DATA_CONFIG['test_data_path']:
        # 使用预划分的数据集
        X_train, X_test, y_train, y_test, scaler = load_preprocessed_dataset(
            DATA_CONFIG['data_path'],
            DATA_CONFIG['test_data_path'],
            DATA_CONFIG['feature_columns'],
            DATA_CONFIG['target_column']
        )
    else:
        # 使用原始数据并自动划分
        X_train, X_test, y_train, y_test, scaler = load_and_preprocess_data(
            DATA_CONFIG['data_path'],
            DATA_CONFIG['feature_columns'],
            DATA_CONFIG['target_column'],
            TRAIN_CONFIG['sequence_length'],
            TRAIN_CONFIG['train_split']
        )
    
    print(f"训练集大小: {X_train.shape}")
    print(f"测试集大小: {X_test.shape}")
    
    # 计算类别权重（平衡但偏向少数类）
    class_counts = torch.bincount(y_train.long())
    total_samples = len(y_train)
    
    # 使用平衡但偏向少数类的权重计算方法
    # 给少数类别适中的权重，保持模型平衡
    class_weights = torch.tensor([
        (total_samples / class_counts[0]) ** 0.6,  # 类别0权重
        (total_samples / class_counts[1]) ** 0.4   # 类别1权重
    ])
    
    # 归一化权重
    class_weights = class_weights / class_weights.sum()
    
    print(f"类别计数: {class_counts}")
    print(f"类别权重: {class_weights}")
    
    # 创建加权采样器来处理类别不平衡
    sample_weights = torch.tensor([class_weights[int(label)] for label in y_train])
    sampler = WeightedRandomSampler(sample_weights, len(sample_weights), replacement=True)
    
    # 创建数据加载器
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=TRAIN_CONFIG['batch_size'], sampler=sampler)
    test_loader = DataLoader(test_dataset, batch_size=TRAIN_CONFIG['batch_size'], shuffle=False)
    
    # 创建模型（使用基本模型，避免过拟合）
    print("正在创建模型...")
    model = create_model('basic', **MODEL_CONFIG)
    model.to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss(weight=class_weights.to(device))
    optimizer = optim.Adam(
        model.parameters(), 
        lr=TRAIN_CONFIG['learning_rate'],
        weight_decay=TRAIN_CONFIG.get('weight_decay', 0.0)
    )
    
    # 学习率调度器 - 使用更稳定的组合
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.7, patience=10, verbose=True, min_lr=1e-6
    )
    
    # 训练历史记录
    train_losses = []
    val_losses = []
    train_accuracies = []
    val_accuracies = []
    val_f1_scores = []
    
    # 早停参数
    best_val_loss = float('inf')
    patience = 30
    patience_counter = 0
    
    print("开始训练...")
    start_time = time.time()
    
    for epoch in range(TRAIN_CONFIG['num_epochs']):
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()
            
            train_loss += loss.item()
            _, predicted = torch.max(output.data, 1)
            train_total += target.size(0)
            train_correct += (predicted == target).sum().item()
        
        train_accuracy = train_correct / train_total
        train_losses.append(train_loss / len(train_loader))
        train_accuracies.append(train_accuracy)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output = model(data)
                loss = criterion(output, target)
                
                val_loss += loss.item()
                _, predicted = torch.max(output.data, 1)
                val_total += target.size(0)
                val_correct += (predicted == target).sum().item()
                
                all_predictions.extend(predicted.cpu().numpy())
                all_targets.extend(target.cpu().numpy())
        
        val_accuracy = val_correct / val_total
        val_loss_avg = val_loss / len(test_loader)
        val_losses.append(val_loss_avg)
        val_accuracies.append(val_accuracy)
        
        # 计算F1分数（更关注类别平衡）
        val_f1 = f1_score(all_targets, all_predictions, average='weighted')
        val_f1_scores.append(val_f1)
        
        # 更新学习率
        scheduler.step(val_loss_avg)  # ReduceLROnPlateau基于验证损失调用
        
        # 早停检查
        if val_loss_avg < best_val_loss:
            best_val_loss = val_loss_avg
            patience_counter = 0
            # 保存最佳模型
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_loss': best_val_loss,
                'val_accuracy': val_accuracy
            }, 'models/best_model.pth')
        else:
            patience_counter += 1
        
        # 打印训练进度
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch+1}/{TRAIN_CONFIG["num_epochs"]}], '
                  f'训练损失: {train_losses[-1]:.4f}, 训练准确率: {train_accuracy:.4f}, '
                  f'验证损失: {val_loss_avg:.4f}, 验证准确率: {val_accuracy:.4f}, '
                  f'验证F1: {val_f1:.4f}, 学习率: {optimizer.param_groups[0]["lr"]:.6f}')
        
        # 早停
        if patience_counter >= patience:
            print(f"早停在第 {epoch+1} 轮")
            break
    
    end_time = time.time()
    training_time = end_time - start_time
    print(f"训练完成! 总训练时间: {training_time:.2f}秒")
    
    # 加载最佳模型
    checkpoint = torch.load('models/best_model.pth')
    model.load_state_dict(checkpoint['model_state_dict'])
    print(f"加载最佳模型（第{checkpoint['epoch']+1}轮），验证损失: {checkpoint['val_loss']:.4f}")
    
    # 绘制训练历史
    plot_training_history(train_losses, val_losses, train_accuracies, val_accuracies)
    
    # 评估模型
    print("\n正在评估模型...")
    evaluate_model(model, X_test, y_test, device)
    
    # 保存最终模型
    save_model(model, SAVE_CONFIG['model_save_path'])
    
    # 保存训练日志
    log_df = pd.DataFrame({
        'epoch': range(1, len(train_losses) + 1),
        'train_loss': train_losses,
        'val_loss': val_losses,
        'train_accuracy': train_accuracies,
        'val_accuracy': val_accuracies,
        'val_f1_score': val_f1_scores
    })
    
    os.makedirs(os.path.dirname(SAVE_CONFIG['log_save_path']), exist_ok=True)
    log_df.to_csv(SAVE_CONFIG['log_save_path'], index=False)
    print(f"训练日志已保存到: {SAVE_CONFIG['log_save_path']}")
    
    return model, scaler

if __name__ == "__main__":
    # 检查数据文件是否存在
    if not os.path.exists(DATA_CONFIG['data_path']):
        print(f"警告: 数据文件 {DATA_CONFIG['data_path']} 不存在")
        print("请确保数据文件存在或修改DATA_CONFIG中的data_path")
    else:
        # 开始训练
        model, scaler = train_advanced_model()
        
        print("\n训练完成!")
        print("模型已保存到:", SAVE_CONFIG['model_save_path'])
        print("训练日志已保存到:", SAVE_CONFIG['log_save_path'])
        print("训练历史图已保存为: training_history.png")
        print("混淆矩阵图已保存为: confusion_matrix.png")