"""
训练模块
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import time
import os
from tqdm import tqdm
from config import Config
from model import get_model
from dataset import get_dataloaders
from utils import save_checkpoint, load_checkpoint

def train_one_epoch(model, train_loader, criterion, optimizer, epoch, cfg):
    """
    训练一个epoch
    """
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    
    pbar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{cfg.EPOCHS}", leave=False)
    for batch_idx, (inputs, targets) in enumerate(pbar):
        inputs, targets = inputs.to(cfg.DEVICE), targets.to(cfg.DEVICE)
        
        # 前向传播
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        
        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # 统计信息
        running_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        
        # 更新进度条
        pbar.set_postfix({
            'loss': running_loss/(batch_idx+1),
            'acc': 100.*correct/total
        })
    
    epoch_loss = running_loss / len(train_loader)
    epoch_acc = 100. * correct / total
    
    return epoch_loss, epoch_acc

def validate(model, test_loader, criterion, cfg):
    """
    验证模型
    """
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    
    with torch.no_grad():
        pbar = tqdm(test_loader, desc="Validating", leave=False)
        for batch_idx, (inputs, targets) in enumerate(pbar):
            inputs, targets = inputs.to(cfg.DEVICE), targets.to(cfg.DEVICE)
            
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            
            pbar.set_postfix({
                'loss': running_loss/(batch_idx+1),
                'acc': 100.*correct/total
            })
    
    val_loss = running_loss / len(test_loader)
    val_acc = 100. * correct / total
    
    return val_loss, val_acc

def train():
    """
    主训练函数
    """
    cfg = Config()
    
    # 设置随机种子
    torch.manual_seed(cfg.SEED)
    if cfg.DEVICE == 'cuda':
        torch.cuda.manual_seed_all(cfg.SEED)
    
    # 创建保存目录
    os.makedirs(cfg.SAVE_DIR, exist_ok=True)
    os.makedirs(cfg.LOG_DIR, exist_ok=True)
    
    # 获取数据和模型
    train_loader, test_loader = get_dataloaders()
    model = get_model()
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(model.parameters(), lr=cfg.LEARNING_RATE, weight_decay=cfg.WEIGHT_DECAY)
    
    # 学习率调度器
    scheduler = StepLR(optimizer, step_size=15, gamma=0.1) if cfg.LR_SCHEDULER else None
    
    # 训练循环
    best_acc = 0.0
    history = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}
    
    print("Starting training...")
    for epoch in range(cfg.EPOCHS):
        start_time = time.time()
        
        # 训练一个epoch
        train_loss, train_acc = train_one_epoch(model, train_loader, criterion, optimizer, epoch, cfg)
        
        # 验证
        val_loss, val_acc = validate(model, test_loader, criterion, cfg)
        
        # 更新学习率
        if scheduler:
            scheduler.step()
        
        # 保存历史
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_acc)
        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_acc)
        
        # 保存检查点
        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_acc': best_acc,
            'optimizer': optimizer.state_dict(),
            'history': history
        }, is_best, cfg.SAVE_DIR)
        
        # 打印信息
        epoch_time = time.time() - start_time
        print(f"Epoch [{epoch+1}/{cfg.EPOCHS}] - "
              f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}% - "
              f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}% - "
              f"Time: {epoch_time:.2f}s")
    
    print(f"Training complete. Best validation accuracy: {best_acc:.2f}%")
    
    return history

if __name__ == "__main__":
    train()