# 1. 导入必要的库
import torch
import torch.optim as optim
import os
from tqdm import tqdm
import argparse
from config import create_new_run_dir, get_latest_run_dir
from model import MOE_Model
from tools import (
    evaluate_model,
    get_data_loaders,
    get_training_config,
    update_training_history,
    save_checkpoints,
    print_epoch_results,
    finalize_training,
)

# 2. 全局变量定义
run_dir = None  # 当前运行的目录
train_losses = []  # 训练损失历史
train_accs = []  # 训练准确率历史
val_accs = []  # 验证准确率历史


def resume_training(model, optimizer, device):
    """尝试恢复之前的训练状态"""
    try:
        latest_run = get_latest_run_dir()
        if latest_run:
            checkpoint_path = os.path.join(latest_run, "best_model.pth")
            if os.path.exists(checkpoint_path):
                print(f"找到最近的训练记录: {os.path.basename(latest_run)}")
                checkpoint = torch.load(checkpoint_path, map_location=device)
                model.load_state_dict(checkpoint["model_state_dict"])
                optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
                return checkpoint["epoch"] + 1, checkpoint["accuracy"]
    except Exception as e:
        print(f"恢复训练状态失败: {e}")
    return 0, 0


def train_epoch(model, train_loader, optimizer, scheduler, criterion, device):
    """训练一个epoch"""
    model.train()
    total_loss = 0
    correct = 0
    total = 0

    # 移动平均统计
    running_loss = 0.0
    running_correct = 0
    running_total = 0
    running_window = 50

    # 创建进度条
    batch_pbar = tqdm(train_loader, desc="训练中", leave=True, ncols=80)

    for batch_idx, (images, labels) in enumerate(batch_pbar, 1):
        images = images.to(device)
        labels = labels.to(device)

        # 前向传播和优化
        outputs = model(images)
        loss = criterion(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

        # 更新统计信息
        batch_loss = loss.item()
        total_loss += batch_loss
        _, predicted = torch.max(outputs.data, 1)
        batch_correct = (predicted == labels).sum().item()
        total += labels.size(0)
        correct += batch_correct

        # 更新移动平均
        running_loss = (running_loss * (running_window - 1) + batch_loss) / running_window
        running_correct += batch_correct
        running_total += labels.size(0)

        if running_total > running_window:
            running_correct = running_correct - (running_total - running_window)
            running_total = running_window

        # 更新进度条
        if batch_idx % 20 == 0:
            current_lr = optimizer.param_groups[0]["lr"]
            batch_pbar.set_postfix(
                {
                    "loss": f"{running_loss:.4f}",
                    "acc": f"{100.0 * running_correct/running_total:.2f}%",
                    "lr": f"{current_lr:.1e}",
                },
                refresh=True,
            )

    avg_loss = total_loss / len(train_loader)
    accuracy = 100 * correct / total
    return avg_loss, accuracy


def train_model(max_epochs=50, batch_size=128, num_experts=5, resume=True):
    """训练混合专家模型"""
    # 设置设备和目录
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    global run_dir
    run_dir = create_new_run_dir()
    print(f"使用设备: {device}")
    print(f"训练输出目录: {run_dir}")

    # 初始化模型和组件
    model = MOE_Model(num_experts).to(device)
    optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.01)

    # 加载数据
    train_loader, test_loader = get_data_loaders(batch_size)

    # 获取训练配置
    scheduler, criterion, early_stopping = get_training_config(
        optimizer, max_epochs, len(train_loader)
    )

    # 尝试恢复训练
    start_epoch, best_accuracy = (
        resume_training(model, optimizer, device) if resume else (0, 0)
    )

    # 训练循环
    for epoch in range(start_epoch, max_epochs):
        # 训练一个epoch
        train_loss, train_accuracy = train_epoch(
            model, train_loader, optimizer, scheduler, criterion, device
        )

        # 验证
        val_loss, val_accuracy = evaluate_model(model, test_loader, criterion, device)

        # 记录和保存
        update_training_history(
            train_loss, train_accuracy, val_accuracy, run_dir, 
            train_losses, train_accs, val_accs
        )

        # 检查早停
        if early_stopping(epoch, val_accuracy):
            print(f"\n早停触发，连续{early_stopping.patience}轮未提升")
            break

        # 保存检查点
        save_checkpoints(model, optimizer, epoch, train_loss, train_accuracy, run_dir)

        # 打印结果
        print_epoch_results(
            epoch, train_loss, train_accuracy, val_loss, val_accuracy, optimizer
        )

    # 完成训练
    finalize_training(
        model, optimizer, criterion, test_loader, device, run_dir,
        train_losses, train_accs, val_accs
    )

    return model


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="训练手写数字识别模型")
    parser.add_argument("--resume", action="store_true", help="从上次训练断点继续训练")
    parser.add_argument("--epochs", type=int, default=50, help="训练轮数")
    parser.add_argument("--batch-size", type=int, default=128, help="批次大小")
    parser.add_argument("--num-experts", type=int, default=5, help="专家数量")
    args = parser.parse_args()

    # 开始模型训练
    model = train_model(
        max_epochs=args.epochs,
        batch_size=args.batch_size,
        num_experts=args.num_experts,
        resume=args.resume,
    )
