#!/usr/bin/env python3
"""
GPU LoRA训练脚本

使用LoRA方法训练SAM模型进行超声心动图分割
"""

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from accelerate import Accelerator
import wandb
from pathlib import Path
import sys
import json
import yaml
from tqdm import tqdm

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root / 'src'))
sys.path.insert(0, str(project_root / 'external' / 'segment-anything'))

from utils.logger import get_logger
from utils.config import load_config
from data.dataset import EchoVideoDataset
from sam.lora.sam_lora import SAMLoRA
from sam.lora.lora_config import LoRAConfig


def setup_training():
    """设置训练环境"""
    logger = get_logger("GPUTraining")
    
    # 检查CUDA
    if not torch.cuda.is_available():
        logger.error("CUDA不可用，请检查GPU环境")
        return None, None, None
    
    # 初始化accelerator
    accelerator = Accelerator()
    device = accelerator.device
    
    logger.info(f"使用设备: {device}")
    logger.info(f"GPU数量: {torch.cuda.device_count()}")
    
    # 加载配置
    config = load_config("configs/lora_training.yaml")
    lora_config = LoRAConfig.from_dict(config['lora'])
    
    # 初始化wandb
    if accelerator.is_main_process:
        wandb.init(
            project="sam-echo-video",
            config=config,
            name="lora_training"
        )
    
    return accelerator, lora_config, logger


def load_datasets(lora_config: LoRAConfig):
    """加载数据集"""
    logger = get_logger("DataLoader")
    
    # 检查预处理数据
    data_dir = Path("data/processed/segmentation")
    if not data_dir.exists():
        logger.error("预处理数据不存在，请先运行数据预处理")
        return None, None
    
    # 加载数据划分
    train_split_file = data_dir / "train_split.json"
    val_split_file = data_dir / "val_split.json"
    
    if not train_split_file.exists() or not val_split_file.exists():
        logger.error("数据划分文件不存在，请先运行数据预处理")
        return None, None
    
    with open(train_split_file, 'r', encoding='utf-8') as f:
        train_samples = json.load(f)['samples']
    
    with open(val_split_file, 'r', encoding='utf-8') as f:
        val_samples = json.load(f)['samples']
    
    logger.info(f"训练样本: {len(train_samples)}")
    logger.info(f"验证样本: {len(val_samples)}")
    
    # 创建数据集
    train_dataset = EchoVideoDataset(
        samples=train_samples,
        split="train"
    )
    
    val_dataset = EchoVideoDataset(
        samples=val_samples,
        split="val"
    )
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=lora_config.batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=lora_config.batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True
    )
    
    return train_loader, val_loader


def create_model(lora_config: LoRAConfig, device: str):
    """创建模型"""
    logger = get_logger("ModelCreation")
    
    # 检查检查点
    checkpoint_path = lora_config.checkpoint_path
    if checkpoint_path and not Path(checkpoint_path).exists():
        logger.warning(f"检查点不存在: {checkpoint_path}")
        checkpoint_path = None
    
    # 创建LoRA SAM模型
    model = SAMLoRA(
        base_model=lora_config.base_model,
        checkpoint_path=checkpoint_path,
        lora_config=lora_config,
        device=device
    )
    
    # 打印模型信息
    trainable_params, total_params = model.get_trainable_parameters()
    logger.info(f"可训练参数: {trainable_params:,}")
    logger.info(f"总参数: {total_params:,}")
    logger.info(f"可训练比例: {trainable_params/total_params*100:.2f}%")
    
    return model


def train_epoch(model, train_loader, optimizer, accelerator, lora_config, logger, epoch):
    """训练一个epoch"""
    model.train()
    total_loss = 0
    num_batches = 0
    
    progress_bar = tqdm(train_loader, desc=f"Epoch {epoch}")
    
    for batch_idx, batch in enumerate(progress_bar):
        # 前向传播
        loss = model.training_step(batch)
        
        # 反向传播
        accelerator.backward(loss)
        
        # 梯度裁剪
        if accelerator.sync_gradients:
            accelerator.clip_grad_norm_(model.parameters(), max_norm=1.0)
        
        optimizer.step()
        optimizer.zero_grad()
        
        # 记录损失
        total_loss += loss.item()
        num_batches += 1
        
        # 更新进度条
        progress_bar.set_postfix({
            'loss': f'{loss.item():.4f}',
            'avg_loss': f'{total_loss/num_batches:.4f}'
        })
        
        # 日志记录
        if batch_idx % lora_config.logging_steps == 0:
            logger.info(f"Epoch {epoch}, Batch {batch_idx}, Loss: {loss.item():.4f}")
            
            if accelerator.is_main_process:
                wandb.log({
                    "train_loss": loss.item(),
                    "epoch": epoch,
                    "batch": batch_idx
                })
    
    avg_loss = total_loss / num_batches
    return avg_loss


def validate(model, val_loader, accelerator, lora_config, logger, epoch):
    """验证模型"""
    model.eval()
    total_loss = 0
    num_batches = 0
    
    with torch.no_grad():
        for batch in val_loader:
            loss = model.validation_step(batch)
            total_loss += loss.item()
            num_batches += 1
    
    avg_loss = total_loss / num_batches
    logger.info(f"Validation Loss: {avg_loss:.4f}")
    
    if accelerator.is_main_process:
        wandb.log({
            "val_loss": avg_loss,
            "epoch": epoch
        })
    
    return avg_loss


def save_checkpoint(model, optimizer, epoch, step, save_dir: Path, accelerator):
    """保存检查点"""
    save_dir.mkdir(parents=True, exist_ok=True)
    
    # 保存LoRA权重
    lora_path = save_dir / f"lora_weights_epoch_{epoch}_step_{step}.pt"
    model.save_lora_weights(str(lora_path))
    
    # 保存完整状态
    state_path = save_dir / f"checkpoint_epoch_{epoch}_step_{step}.pt"
    accelerator.save_state(str(state_path))
    
    return lora_path, state_path


def main():
    """主训练函数"""
    # 设置训练环境
    accelerator, lora_config, logger = setup_training()
    if accelerator is None:
        return
    
    device = accelerator.device
    
    # 加载数据集
    train_loader, val_loader = load_datasets(lora_config)
    if train_loader is None:
        return
    
    # 创建模型
    model = create_model(lora_config, device)
    
    # 创建优化器
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=lora_config.learning_rate,
        weight_decay=0.01
    )
    
    # 创建学习率调度器
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=lora_config.max_steps,
        eta_min=lora_config.learning_rate * 0.01
    )
    
    # 准备训练
    model, optimizer, train_loader, val_loader = accelerator.prepare(
        model, optimizer, train_loader, val_loader
    )
    
    # 创建保存目录
    save_dir = Path("checkpoints/lora")
    save_dir.mkdir(parents=True, exist_ok=True)
    
    # 训练循环
    logger.info("开始训练...")
    best_val_loss = float('inf')
    step = 0
    
    for epoch in range(lora_config.max_steps // len(train_loader) + 1):
        # 训练
        train_loss = train_epoch(
            model, train_loader, optimizer, accelerator, 
            lora_config, logger, epoch
        )
        
        # 验证
        if epoch % (lora_config.eval_steps // len(train_loader)) == 0:
            val_loss = validate(
                model, val_loader, accelerator, 
                lora_config, logger, epoch
            )
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_lora_path, best_state_path = save_checkpoint(
                    model, optimizer, epoch, step, save_dir, accelerator
                )
                logger.info(f"新的最佳模型已保存: {best_lora_path}")
        
        # 保存检查点
        if epoch % (lora_config.save_steps // len(train_loader)) == 0:
            lora_path, state_path = save_checkpoint(
                model, optimizer, epoch, step, save_dir, accelerator
            )
            logger.info(f"检查点已保存: {lora_path}")
        
        # 更新学习率
        scheduler.step()
        
        step += len(train_loader)
        
        if step >= lora_config.max_steps:
            break
    
    # 保存最终模型
    final_lora_path, final_state_path = save_checkpoint(
        model, optimizer, epoch, step, save_dir, accelerator
    )
    
    logger.info("训练完成！")
    logger.info(f"最佳验证损失: {best_val_loss:.4f}")
    logger.info(f"最终模型: {final_lora_path}")
    
    if accelerator.is_main_process:
        wandb.finish()


if __name__ == "__main__":
    main()
