import argparse
import torch
from torch.utils.data import DataLoader
from src.models.struct_encoder import StructEncoder
from src.data.dataset import FontDataset
from src.training.losses import PathLoss
from src.configs import base_config
import os
from datetime import datetime

def parse_args():
    parser = argparse.ArgumentParser()
    # 使用base_config中的默认值
    parser.add_argument('--batch_size', type=int, default=base_config['data']['batch_size'])
    parser.add_argument('--lr', type=float, default=2e-4)
    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--device', choices=['auto', 'cuda', 'cpu'], default='auto',
                       help="训练设备 (auto:自动选择, cuda:强制GPU, cpu:强制CPU)")
    return parser.parse_args()

def train(args):
    # 数据集拆分（保持原有数据集加载）
    dataset = FontDataset()
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
    
    # 修改DataLoader初始化
    train_loader = DataLoader(
        train_dataset, 
        batch_size=args.batch_size, 
        shuffle=True,
        collate_fn=FontDataset.collate_fn  # 关键修复点
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        collate_fn=FontDataset.collate_fn    # 关键修复点
    )

    # 设备选择和模型初始化（保持原有设备选择逻辑）
    if args.device == 'auto':
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    else:
        device = torch.device(args.device)
    
    # 修改模型初始化
    model = StructEncoder(
        vocab_size=100,
        d_model=256,  # 减小模型尺寸
        dropout=0.3,
        n_layers=3
    ).to(device)
    
    # 修改优化器配置
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=args.lr,
        weight_decay=1e-4
    )
    
    # 添加梯度裁剪
    torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
    
    # 学习率调度器修改为更敏感的配置
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 
        mode='min', 
        factor=0.1,    # 更激进的学习率衰减
        patience=1,    # 更早触发调整
        verbose=True,
        min_lr=1e-6    # 最小学习率限制
    )
    criterion = PathLoss()
    
    # 学习率调度器（新增）
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.5, patience=2, verbose=True
    )

    # 早停机制初始化（新增）
    best_val_loss = float('inf')
    patience = 3
    no_improve_epochs = 0

    # 创建输出目录（保持原有逻辑）
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_dir = f"outputs/struct_encoder/{timestamp}"
    os.makedirs(output_dir, exist_ok=True)

    for epoch in range(args.epochs):
        # 训练阶段 - 增强日志输出
        model.train()
        epoch_loss = 0.0
        for i, batch in enumerate(train_loader):
            inputs = {
                'tokens': batch['tokens'].to(device),  # 修改为.to(device)
                'image': batch['image'].float().to(device)  # 修改为.to(device)
            }
            
            outputs = model(inputs)
            losses = criterion(outputs, inputs)
            
            optimizer.zero_grad()
            losses['total'].backward()
            optimizer.step()
            
            # 记录损失
            epoch_loss += losses['total'].item()
            
            # 更详细的batch日志
            if i % 10 == 0:
                print(f"Epoch {epoch+1}/{args.epochs} | Batch {i}/{len(train_loader)} | "
                      f"Loss: {losses['total'].item():.4f} | "
                      f"Seq: {losses['seq'].item():.4f} | "
                      f"Img: {losses['img'].item():.4f} | "
                      f"LR: {optimizer.param_groups[0]['lr']:.2e}")

        # 验证阶段 - 增强验证指标
        model.eval()
        val_metrics = {'loss': 0.0, 'seq_acc': 0.0}
        with torch.no_grad():
            for batch in val_loader:
                outputs = model(batch)
                losses = criterion(outputs, batch)
                val_metrics['loss'] += losses['total'].item()
                
                # 计算序列预测准确率
                preds = outputs['seq'].argmax(dim=-1)
                val_metrics['seq_acc'] += (preds == batch['tokens']).float().mean().item()
        
        # 打印完整epoch日志
        avg_val_loss = val_metrics['loss'] / len(val_loader)
        avg_val_acc = val_metrics['seq_acc'] / len(val_loader)
        print(f"Epoch {epoch+1} Summary | "
              f"Train Loss: {epoch_loss/len(train_loader):.4f} | "
              f"Val Loss: {avg_val_loss:.4f} | "
              f"Val Acc: {avg_val_acc:.4f}")

        # 每个epoch都保存模型和特征
        torch.save(model.state_dict(), f"{output_dir}/model_epoch{epoch+1}.pt")
        torch.save({
            'epoch': epoch+1,
            'model_state': model.state_dict(),
            'optimizer_state': optimizer.state_dict(),
            'train_loss': epoch_loss/len(train_loader),
            'val_loss': avg_val_loss,
            'val_acc': avg_val_acc
        }, f"{output_dir}/checkpoint_epoch{epoch+1}.pt")

        # 学习率调度（新增）
        scheduler.step(avg_val_loss)

        # 早停机制（新增）
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            no_improve_epochs = 0
            # 保存最佳模型（兼容原有保存逻辑）
            torch.save(model.state_dict(), f"{output_dir}/best_model.pt")
        else:
            no_improve_epochs += 1
            if no_improve_epochs >= patience:
                print(f"Early stopping at epoch {epoch}")
                break

        # 保持原有的定期保存逻辑
        if (epoch+1) % 10 == 0 or epoch == args.epochs-1:
            model_path = f"{output_dir}/model_epoch{epoch+1}.pt"
            torch.save(model.state_dict(), model_path)
            
            # 保持原有的特征保存逻辑
            sample_features = model(inputs)
            feature_path = f"{output_dir}/features_epoch{epoch+1}.pt"
            torch.save({
                'seq_features': sample_features['seq'].detach().cpu(),
                'img_features': sample_features['img'].detach().cpu(),
                'tokens': inputs['tokens'].detach().cpu(),
                'images': inputs['image'].detach().cpu()
            }, feature_path)

def main():
    args = parse_args()
    train(args)

if __name__ == "__main__":
    main()