import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import argparse
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR
from src.data.dataloader import get_har_dataloader
# 移除已删除的模型导入
from src.models.attention_model import HARAttention
from src.models.transformer_model import HARTransformer
# 导入新模型
from src.models.cnn_lstm_attention_model import HARCNNLSTMAttention
from src.models.cnn_resnet_model import HARResNet
from src.train import train_model, evaluate_model, plot_training_history

def get_model_specific_lr(model_name, base_lr=0.001):
    """
    根据模型类型返回适合的学习率
    
    参数:
        model_name: 模型名称
        base_lr: 基础学习率
    
    返回:
        适合该模型的学习率
    """
    # 不同模型的学习率调整系数
    lr_factors = {
        'attention': 1.0,
        'transformer': 0.5,  # Transformer通常需要较小的学习率
        'densenet_lstm': 0.8,
        'cnn_lstm_attention': 0.7,
        'resnet': 0.6  # 深度网络通常需要较小的学习率
    }
    
    return base_lr * lr_factors.get(model_name, 1.0)

def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='HAR模型训练和评估')
    parser.add_argument('--model', type=str, default='attention', 
                        choices=['attention', 'transformer', 
                                'densenet_lstm', 'cnn_lstm_attention', 
                                'resnet'],
                        help='选择要使用的模型')
    parser.add_argument('--batch_size', type=int, default=64, help='批次大小')
    parser.add_argument('--epochs', type=int, default=50, help='训练轮数')
    parser.add_argument('--lr', type=float, default=0.001, help='基础学习率')
    parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu',
                        help='训练设备')
    parser.add_argument('--scheduler', type=str, default='plateau', 
                        choices=['none', 'plateau', 'cosine', 'onecycle'],
                        help='学习率调度器类型')
    args = parser.parse_args()
    
    # 设置数据集路径
    data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'har_dataset')
    
    # 创建训练集和测试集的数据加载器
    train_loader = get_har_dataloader(data_dir, batch_size=args.batch_size, split='train', shuffle=True)
    test_loader = get_har_dataloader(data_dir, batch_size=args.batch_size, split='test', shuffle=False)
    
    # 打印数据集信息
    print(f"训练集大小: {len(train_loader.dataset)}")
    print(f"测试集大小: {len(test_loader.dataset)}")
    
    # 获取一个批次的数据，查看形状
    for batch in train_loader:
        features = batch['features']
        labels = batch['label']
        subjects = batch['subject']
        
        print(f"特征形状: {features.shape}")
        print(f"标签形状: {labels.shape}")
        print(f"受试者形状: {subjects.shape}")
        
        # 打印标签分布
        unique_labels, counts = np.unique(labels.numpy(), return_counts=True)
        print("标签分布:")
        for label, count in zip(unique_labels, counts):
            print(f"  活动 {label}: {count} 样本")
            
        # 只打印第一个批次的信息
        break
    
    # 创建模型
    input_size = features.shape[1]  # 特征维度
    num_classes = len(unique_labels)  # 类别数
    
    if args.model == 'attention':
        model = HARAttention(input_size=input_size, num_classes=num_classes)
    elif args.model == 'transformer':
        model = HARTransformer(input_size=input_size, num_classes=num_classes)
    # 新增模型选项
    elif args.model == 'cnn_lstm_attention':
        model = HARCNNLSTMAttention(input_size=input_size, num_classes=num_classes)
    elif args.model == 'resnet':
        model = HARResNet(input_size=input_size, num_classes=num_classes)
    
    print(f"使用模型: {args.model}")
    print(model)
    
    # 获取模型特定的学习率
    model_lr = get_model_specific_lr(args.model, args.lr)
    print(f"使用学习率: {model_lr}")
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=model_lr)
    
    # 创建学习率调度器
    scheduler = None
    if args.scheduler != 'none':
        if args.scheduler == 'plateau':
            scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)
            print("使用ReduceLROnPlateau学习率调度器")
        elif args.scheduler == 'cosine':
            scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=model_lr/10)
            print("使用CosineAnnealingLR学习率调度器")
        elif args.scheduler == 'onecycle':
            steps_per_epoch = len(train_loader)
            scheduler = OneCycleLR(optimizer, max_lr=model_lr*3, steps_per_epoch=steps_per_epoch, epochs=args.epochs)
            print("使用OneCycleLR学习率调度器")
    
    # 创建模型保存目录
    model_save_path = os.path.join('models', args.model)
    os.makedirs(model_save_path, exist_ok=True)
    
    # 训练模型
    history = train_model(
        model=model,
        train_loader=train_loader,
        val_loader=test_loader,  # 使用测试集作为验证集
        criterion=criterion,
        optimizer=optimizer,
        num_epochs=args.epochs,
        device=args.device,
        model_save_path=model_save_path,
        scheduler=scheduler,
        scheduler_type=args.scheduler
    )
    
    # 绘制训练历史记录
    plot_training_history(history, save_path=os.path.join(model_save_path, 'training_history.png'))
    
    # 加载最佳模型
    model.load_state_dict(torch.load(os.path.join(model_save_path, 'best_model.pth')))
    
    # 评估模型
    # 获取活动标签
    activity_labels = {}
    with open(os.path.join(data_dir, 'activity_labels.txt'), 'r') as f:
        for line in f:
            idx, label = line.strip().split(' ', 1)
            activity_labels[int(idx)] = label
    
    evaluate_model(
        model=model,
        test_loader=test_loader,
        criterion=criterion,
        device=args.device,
        activity_labels=activity_labels
    )
    
    print(f"模型训练和评估完成！结果保存在 {model_save_path} 目录中。")

if __name__ == "__main__":
    main()