import os
import torch
import argparse
import torch.optim as optim
from tqdm import tqdm
from data_preprocess import get_dataloader
from simclr import SimCLR, simclr_loss  # 修改导入路径

def train(args):
    # 设备配置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 数据加载
    train_loader = get_dataloader(
        root_dir=args.data_dir,
        batch_size=args.batch_size,
        split='train'
    )
    val_loader = get_dataloader(
        root_dir=args.data_dir,
        batch_size=args.batch_size,
        split='val'
    )
    
    # 模型初始化
    model = SimCLR(projection_dim=args.projection_dim).to(device)
    optimizer = optim.AdamW(
        model.parameters(),
        lr=args.lr,
        weight_decay=args.weight_decay
    )
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer, 
        T_max=args.epochs
    )
    
    # 训练循环
    best_loss = float('inf')
    for epoch in range(args.epochs):
        model.train()
        train_loss = 0.0
        
        for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            images = batch['images']  # list of tensors: [num_views*num_res, B, C, H, W]
            labels = batch['label'].to(device)  # [B]
            
            # 整理输入格式：[B * num_views, C, H, W]
            num_views = len(images) // labels.shape[0]
            images = torch.cat(images, dim=0).to(device)  # [B*num_views, C, H, W]
            
            # 前向传播
            _, projections = model(images)  # [B*num_views, projection_dim]
            
            # 计算损失
            loss = simclr_loss(projections, labels, temperature=args.temperature)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item() * images.shape[0]
        
        # 验证
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for batch in val_loader:
                images = batch['image'].to(device)  # [B, C, H, W]
                labels = batch['label'].to(device)
                _, projections = model(images)
                loss = simclr_loss(projections, labels, temperature=args.temperature)
                val_loss += loss.item() * images.shape[0]
        
        # 日志
        train_loss /= len(train_loader.dataset)
        val_loss /= len(val_loader.dataset)
        print(f'Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}')
        
        # 保存最佳模型
        if val_loss < best_loss:
            best_loss = val_loss
            torch.save(model.backbone.state_dict(), os.path.join(args.save_dir, 'best_backbone.pth'))
        
        scheduler.step()

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, required=True, help='数据集根目录')
    parser.add_argument('--save_dir', type=str, default='checkpoints', help='模型保存目录')
    parser.add_argument('--projection_dim', type=int, default=128, help='投影头输出维度')
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--lr', type=float, default=1e-4, help='学习率')
    parser.add_argument('--weight_decay', type=float, default=1e-5, help='权重衰减')
    parser.add_argument('--temperature', type=float, default=0.07, help='温度参数')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮次')
    args = parser.parse_args()
    
    os.makedirs(args.save_dir, exist_ok=True)
    train(args)