#!/usr/bin/env python
"""
MNIST训练脚本 - 最终稳定版
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from pathlib import Path
import sys

# 添加项目根目录到路径
sys.path.append(str(Path(__file__).parent.parent))

from core.environment import Environment
from data.mnist_dataset import MNISTDataModule
from models.simple_cnn import SimpleCNN
from utils.logger import setup_logger


def main():
    try:
        # 初始化配置
        env = Environment({
            'batch_size': 128,
            'epochs': 10,
            'lr': 0.001,
            'weight_decay': 1e-5
        })

        # 初始化日志
        logger = setup_logger(
            name="mnist_train",
            log_dir=env.log_dir
        )

        # 打印配置
        logger.info("=" * 50)
        logger.info("Training Configuration:")
        for k, v in env.to_dict().items():
            logger.info(f"{k:>15}: {v}")
        logger.info("=" * 50)

        # 数据准备（使用标准接口）
        datamodule = MNISTDataModule(batch_size=env.batch_size)
        datamodule.prepare_data()
        datamodule.setup()
        train_loader = datamodule.train_dataloader()
        val_loader = datamodule.val_dataloader()

        # 模型初始化
        model = SimpleCNN().to(env.device)
        optimizer = optim.Adam(
            model.parameters(),
            lr=env.lr,
            weight_decay=env.weight_decay
        )
        criterion = nn.CrossEntropyLoss()

        # 训练循环
        best_val_loss = float('inf')
        for epoch in range(1, env.epochs + 1):
            # 训练阶段
            model.train()
            train_loss = 0.0
            with tqdm(train_loader, desc=f'Epoch {epoch}/{env.epochs}') as pbar:
                for data, target in pbar:
                    data, target = data.to(env.device), target.to(env.device)

                    optimizer.zero_grad()
                    output = model(data)
                    loss = criterion(output, target)
                    loss.backward()
                    optimizer.step()

                    train_loss += loss.item()
                    pbar.set_postfix({'loss': loss.item()})

            # 验证阶段
            val_loss = evaluate(model, val_loader, criterion, env.device)
            logger.log_metrics({
                'train_loss': train_loss / len(train_loader),
                'val_loss': val_loss
            }, epoch)

            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                torch.save(
                    model.state_dict(),
                    env.checkpoint_dir / 'best_model.pth'
                )
                logger.info(f"New best model saved at epoch {epoch}")

    except Exception as e:
        logger.error(f"Training failed: {str(e)}")
        raise


def evaluate(model, dataloader, criterion, device):
    """验证集评估"""
    model.eval()
    val_loss = 0.0
    with torch.no_grad():
        for data, target in dataloader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            val_loss += criterion(output, target).item()
    return val_loss / len(dataloader)


if __name__ == "__main__":
    main()