import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR

from src.data.dataloader import get_har_dataloader
# 移除已删除的模型导入
from src.models.attention_model import HARAttention
from src.models.transformer_model import HARTransformer
# 导入新模型
from src.models.cnn_lstm_attention_model import HARCNNLSTMAttention
from src.models.cnn_resnet_model import HARResNet

def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=50, device='cuda', model_save_path='models', scheduler=None, scheduler_type=None):
    """
    训练模型
    
    参数:
        model: 要训练的模型
        train_loader: 训练数据加载器
        val_loader: 验证数据加载器
        criterion: 损失函数
        optimizer: 优化器
        num_epochs: 训练轮数
        device: 训练设备
        model_save_path: 模型保存路径
        scheduler: 学习率调度器
        scheduler_type: 学习率调度器类型，可选值为'plateau', 'cosine', 'onecycle'
    
    返回:
        训练历史记录
    """
    # 确保模型保存路径存在
    os.makedirs(model_save_path, exist_ok=True)
    
    # 将模型移动到指定设备
    model = model.to(device)
    
    # 初始化训练历史记录
    history = {
        'train_loss': [],
        'val_loss': [],
        'train_acc': [],
        'val_acc': [],
        'learning_rates': []  # 记录学习率变化
    }
    
    # 最佳验证准确率
    best_val_acc = 0.0
    
    # 训练循环
    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        
        # 使用tqdm显示进度条
        train_pbar = tqdm(train_loader, desc=f'Epoch {epoch+1}/{num_epochs} [Train]')
        
        # 记录当前学习率
        current_lr = optimizer.param_groups[0]['lr']
        history['learning_rates'].append(current_lr)
        
        for batch in train_pbar:
            # 获取数据和标签
            features = batch['features'].to(device)
            labels = batch['label'].to(device)
            
            # 清零梯度
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(features)
            loss = criterion(outputs, labels)
            
            # 反向传播和优化
            loss.backward()
            optimizer.step()
            
            # 如果使用OneCycleLR，每个batch后更新学习率
            if scheduler is not None and scheduler_type == 'onecycle':
                scheduler.step()
            
            # 统计
            train_loss += loss.item() * features.size(0)
            _, predicted = torch.max(outputs, 1)
            train_total += labels.size(0)
            train_correct += (predicted == labels).sum().item()
            
            # 更新进度条
            train_pbar.set_postfix({'loss': loss.item(), 'acc': train_correct / train_total, 'lr': current_lr})
        
        # 计算训练损失和准确率
        train_loss = train_loss / train_total
        train_acc = train_correct / train_total
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        
        # 不计算梯度
        with torch.no_grad():
            val_pbar = tqdm(val_loader, desc=f'Epoch {epoch+1}/{num_epochs} [Val]')
            
            for batch in val_pbar:
                # 获取数据和标签
                features = batch['features'].to(device)
                labels = batch['label'].to(device)
                
                # 前向传播
                outputs = model(features)
                loss = criterion(outputs, labels)
                
                # 统计
                val_loss += loss.item() * features.size(0)
                _, predicted = torch.max(outputs, 1)
                val_total += labels.size(0)
                val_correct += (predicted == labels).sum().item()
                
                # 更新进度条
                val_pbar.set_postfix({'loss': loss.item(), 'acc': val_correct / val_total})
        
        # 计算验证损失和准确率
        val_loss = val_loss / val_total
        val_acc = val_correct / val_total
        
        # 更新训练历史记录
        history['train_loss'].append(train_loss)
        history['val_loss'].append(val_loss)
        history['train_acc'].append(train_acc)
        history['val_acc'].append(val_acc)
        
        # 打印训练信息
        print(f'Epoch {epoch+1}/{num_epochs}:')
        print(f'  Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}')
        print(f'  Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}')
        print(f'  Learning Rate: {current_lr:.6f}')
        
        # 根据验证集损失更新学习率
        if scheduler is not None:
            if scheduler_type == 'plateau':
                scheduler.step(val_loss)
            elif scheduler_type == 'cosine':
                scheduler.step()
        
        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), os.path.join(model_save_path, 'best_model.pth'))
            print(f'  Saved best model with val_acc: {val_acc:.4f}')
        
        # 保存最后一个模型
        torch.save(model.state_dict(), os.path.join(model_save_path, 'last_model.pth'))
    
    return history

def evaluate_model(model, test_loader, criterion, device='cuda', activity_labels=None):
    """
    评估模型
    
    参数:
        model: 要评估的模型
        test_loader: 测试数据加载器
        criterion: 损失函数
        device: 评估设备
        activity_labels: 活动标签
    
    返回:
        评估结果
    """
    # 将模型移动到指定设备
    model = model.to(device)
    
    # 评估模式
    model.eval()
    
    # 初始化
    test_loss = 0.0
    test_correct = 0
    test_total = 0
    all_preds = []
    all_labels = []
    
    # 不计算梯度
    with torch.no_grad():
        for batch in tqdm(test_loader, desc='Evaluating'):
            # 获取数据和标签
            features = batch['features'].to(device)
            labels = batch['label'].to(device)
            
            # 前向传播
            outputs = model(features)
            loss = criterion(outputs, labels)
            
            # 统计
            test_loss += loss.item() * features.size(0)
            _, predicted = torch.max(outputs, 1)
            test_total += labels.size(0)
            test_correct += (predicted == labels).sum().item()
            
            # 收集预测和标签
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    
    # 计算测试损失和准确率
    test_loss = test_loss / test_total
    test_acc = test_correct / test_total
    
    # 打印评估结果
    print(f'Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}')
    
    # 计算混淆矩阵
    cm = confusion_matrix(all_labels, all_preds)
    
    # 打印分类报告
    if activity_labels is not None:
        target_names = [activity_labels[i+1] for i in range(len(activity_labels))]
        print(classification_report(all_labels, all_preds, target_names=target_names))
    else:
        print(classification_report(all_labels, all_preds))
    
    # 绘制混淆矩阵
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.xlabel('Predicted')
    plt.ylabel('True')
    if activity_labels is not None:
        tick_labels = [activity_labels[i+1] for i in range(len(activity_labels))]
        plt.xticks(np.arange(len(tick_labels)) + 0.5, tick_labels, rotation=45)
        plt.yticks(np.arange(len(tick_labels)) + 0.5, tick_labels, rotation=45)
    plt.title('Confusion Matrix')
    plt.tight_layout()
    plt.savefig('confusion_matrix.png')
    plt.close()
    
    return {
        'test_loss': test_loss,
        'test_acc': test_acc,
        'confusion_matrix': cm,
        'predictions': all_preds,
        'true_labels': all_labels
    }

def plot_training_history(history, save_path='training_history.png'):
    """
    绘制训练历史记录
    
    参数:
        history: 训练历史记录
        save_path: 图像保存路径
    """
    plt.figure(figsize=(15, 10))
    
    # 绘制损失
    plt.subplot(2, 2, 1)
    plt.plot(history['train_loss'], label='Train Loss')
    plt.plot(history['val_loss'], label='Val Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Validation Loss')
    plt.legend()
    
    # 绘制准确率
    plt.subplot(2, 2, 2)
    plt.plot(history['train_acc'], label='Train Acc')
    plt.plot(history['val_acc'], label='Val Acc')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.title('Training and Validation Accuracy')
    plt.legend()
    
    # 绘制学习率变化
    if 'learning_rates' in history:
        plt.subplot(2, 2, 3)
        plt.plot(history['learning_rates'])
        plt.xlabel('Epoch')
        plt.ylabel('Learning Rate')
        plt.title('Learning Rate Schedule')
    
    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()