import torch
import torch.nn as nn
import torch.optim as optim
import argparse
import os
from models.vgg_ft import VGGFT
from metrics.correlation_metrics import CorrelationMetrics
from datasets.ft_dataloader_tid2013 import create_dataloader

def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs, device):
    """
    训练模型
    
    参数:
        model: 模型
        train_loader: 训练数据加载器
        val_loader: 验证数据加载器
        criterion: 损失函数
        optimizer: 优化器
        num_epochs: 训练轮数
        device: 设备(CPU/GPU)
    """
    model.to(device)
    best_srocc = -1.0
    
    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        
        for batch in train_loader:
            inputs = batch['data'].to(device)
            labels = batch['label'].to(device)
            
            # 梯度清零
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(inputs)
            
            # 计算损失
            loss = criterion(outputs, labels)
            
            # 反向传播和优化
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item()
        
        avg_train_loss = train_loss / len(train_loader)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        all_preds = []
        all_labels = []
        
        with torch.no_grad():
            for batch in val_loader:
                inputs = batch['data'].to(device)
                labels = batch['label'].to(device)
                
                # 前向传播
                outputs = model(inputs)
                
                # 计算损失
                loss = criterion(outputs, labels)
                val_loss += loss.item()
                
                # 收集预测和标签用于计算相关系数
                all_preds.append(outputs)
                all_labels.append(labels)
        
        # 连接所有批次的预测和标签
        all_preds = torch.cat(all_preds, dim=0)
        all_labels = torch.cat(all_labels, dim=0)
        
        # 计算相关系数
        srocc, lcc = CorrelationMetrics.compute(all_preds, all_labels)
        
        avg_val_loss = val_loss / len(val_loader)
        
        print(f'Epoch {epoch+1}/{num_epochs}:')
        print(f'  Train Loss: {avg_train_loss:.4f}')
        print(f'  Val Loss: {avg_val_loss:.4f}')
        print(f'  SROCC: {srocc:.4f}, LCC: {lcc:.4f}')
        
        # 保存最佳模型
        if srocc > best_srocc:
            best_srocc = srocc
            torch.save(model.state_dict(), 'best_model.pth')
            print(f'  保存新的最佳模型，SROCC: {srocc:.4f}')
        
        print()

def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='使用RankIQA预训练模型进行微调')
    parser.add_argument('--rank_model', type=str, default='/workspace/myRankIQA/checkpoints/checkpoint_epoch_10.pth', 
                        help='RankIQA预训练模型路径')
    parser.add_argument('--data_root', type=str, 
                        default='/workspace/myRankIQA/data/score_tid2013',
                        help='微调数据集根目录')
    parser.add_argument('--batch_size', type=int, default=30, 
                        help='批次大小')
    parser.add_argument('--epochs', type=int, default=10, 
                        help='训练轮数')
    parser.add_argument('--output_dir', type=str, default='./', 
                        help='输出目录')
    return parser.parse_args()

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 创建模型
    model = VGGFT(pretrained=False)
    
    # 如果提供了RankIQA预训练模型路径，则加载预训练权重
    if args.rank_model and os.path.exists(args.rank_model):
        print(f"加载RankIQA预训练模型: {args.rank_model}")
        model.load_from_rank_model(args.rank_model)
    else:
        print("未提供RankIQA预训练模型，使用ImageNet预训练权重")
    
    # 定义损失函数 - 直接使用PyTorch的MSE损失
    criterion = nn.MSELoss()
    
    # 定义优化器 - 使用与Caffe相似的学习率策略
    param_groups = model.get_parameter_groups()
    optimizer = optim.SGD(param_groups, momentum=0.9)
    
    # 创建数据加载器
    train_loader = create_dataloader(
        pascal_root=args.data_root,
        split="train",
        batch_size=args.batch_size,
        im_shape=(224, 224)
    )
    
    val_loader = create_dataloader(
        pascal_root=args.data_root,
        split="val",
        batch_size=args.batch_size,
        im_shape=(224, 224)
    )
    
    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 切换到输出目录
    original_dir = os.getcwd()
    os.chdir(args.output_dir)
    
    # 训练模型
    train_model(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        criterion=criterion,
        optimizer=optimizer,
        num_epochs=args.epochs,
        device=device
    )
    
    # 切回原目录
    os.chdir(original_dir)

if __name__ == "__main__":
    main()