#!/usr/bin/env python3
"""
视觉学习提示模块训练脚本

训练周期性、质量感知和记忆掩码提示模块
"""

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from accelerate import Accelerator
import wandb
from pathlib import Path
import sys
import json
import yaml
from tqdm import tqdm
import numpy as np

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root / 'src'))

from utils.logger import get_logger
from utils.config import load_config
from data.dataset import EchoVideoDataset
from sam.prompt.visual_learning_prompt import VisualLearningPromptGenerator


def setup_training():
    """设置训练环境"""
    logger = get_logger("VisualPromptTraining")
    
    # 检查CUDA
    if not torch.cuda.is_available():
        logger.error("CUDA不可用，请检查GPU环境")
        return None, None, None
    
    # 初始化accelerator
    accelerator = Accelerator()
    device = accelerator.device
    
    logger.info(f"使用设备: {device}")
    logger.info(f"GPU数量: {torch.cuda.device_count()}")
    
    # 加载配置
    config = load_config("configs/visual_prompt_training.yaml")
    
    # 初始化wandb
    if accelerator.is_main_process:
        wandb.init(
            project="sam-echo-video-visual-prompts",
            config=config,
            name="visual_prompt_training"
        )
    
    return accelerator, config, logger


def load_datasets(config):
    """加载数据集"""
    logger = get_logger("DataLoader")
    
    # 检查预处理数据
    data_dir = Path("data/processed/segmentation")
    if not data_dir.exists():
        logger.error("预处理数据不存在，请先运行数据预处理")
        return None, None
    
    # 加载数据划分
    train_split_file = data_dir / "train_split.json"
    val_split_file = data_dir / "val_split.json"
    
    if not train_split_file.exists() or not val_split_file.exists():
        logger.error("数据划分文件不存在，请先运行数据预处理")
        return None, None
    
    with open(train_split_file, 'r', encoding='utf-8') as f:
        train_samples = json.load(f)['samples']
    
    with open(val_split_file, 'r', encoding='utf-8') as f:
        val_samples = json.load(f)['samples']
    
    logger.info(f"训练样本: {len(train_samples)}")
    logger.info(f"验证样本: {len(val_samples)}")
    
    # 创建数据集
    train_dataset = EchoVideoDataset(
        samples=train_samples,
        split="train"
    )
    
    val_dataset = EchoVideoDataset(
        samples=val_samples,
        split="val"
    )
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=config['training']['batch_size'],
        shuffle=True,
        num_workers=config['data']['num_workers'],
        pin_memory=config['data']['pin_memory']
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config['training']['batch_size'],
        shuffle=False,
        num_workers=config['data']['num_workers'],
        pin_memory=config['data']['pin_memory']
    )
    
    return train_loader, val_loader


def create_model(config, device):
    """创建模型"""
    logger = get_logger("ModelCreation")
    
    # 创建视觉学习提示生成器
    model = VisualLearningPromptGenerator(
        input_dim=config['model']['input_dim'],
        prompt_dim=config['model']['prompt_dim'],
        enable_periodic=config['model']['enable_periodic'],
        enable_quality=config['model']['enable_quality'],
        enable_memory=config['model']['enable_memory'],
        enable_self_prompt=config['model']['enable_self_prompt'],
        fusion_method=config['model']['fusion_method'],
        device=device
    )
    
    # 打印模型信息
    model_info = model.get_model_info()
    logger.info(f"模型信息: {model_info}")
    
    # 打印各模块状态
    module_status = model.get_module_status()
    logger.info(f"模块状态: {module_status}")
    
    return model


def train_epoch(model, train_loader, optimizer, accelerator, config, logger, epoch):
    """训练一个epoch"""
    model.train()
    total_loss = 0
    num_batches = 0
    
    progress_bar = tqdm(train_loader, desc=f"Epoch {epoch}")
    
    for batch_idx, batch in enumerate(progress_bar):
        # 前向传播
        prompts = model.generate_prompts(batch['image'], batch.get('metadata', {}))
        
        # 计算损失
        loss = model.compute_visual_learning_loss(prompts, batch)
        
        # 反向传播
        accelerator.backward(loss)
        
        # 梯度裁剪
        if accelerator.sync_gradients:
            accelerator.clip_grad_norm_(model.parameters(), max_norm=config['training']['max_grad_norm'])
        
        optimizer.step()
        optimizer.zero_grad()
        
        # 记录损失
        total_loss += loss.item()
        num_batches += 1
        
        # 更新进度条
        progress_bar.set_postfix({
            'loss': f'{loss.item():.4f}',
            'avg_loss': f'{total_loss/num_batches:.4f}'
        })
        
        # 日志记录
        if batch_idx % config['training']['logging_steps'] == 0:
            logger.info(f"Epoch {epoch}, Batch {batch_idx}, Loss: {loss.item():.4f}")
            
            if accelerator.is_main_process:
                wandb.log({
                    "train_loss": loss.item(),
                    "epoch": epoch,
                    "batch": batch_idx
                })
    
    avg_loss = total_loss / num_batches
    return avg_loss


def validate(model, val_loader, accelerator, config, logger, epoch):
    """验证模型"""
    model.eval()
    total_loss = 0
    num_batches = 0
    
    with torch.no_grad():
        for batch in val_loader:
            prompts = model.generate_prompts(batch['image'], batch.get('metadata', {}))
            loss = model.compute_visual_learning_loss(prompts, batch)
            total_loss += loss.item()
            num_batches += 1
    
    avg_loss = total_loss / num_batches
    logger.info(f"Validation Loss: {avg_loss:.4f}")
    
    if accelerator.is_main_process:
        wandb.log({
            "val_loss": avg_loss,
            "epoch": epoch
        })
    
    return avg_loss


def save_checkpoint(model, optimizer, epoch, save_dir: Path, accelerator):
    """保存检查点"""
    save_dir.mkdir(parents=True, exist_ok=True)
    
    # 保存模型权重
    model_path = save_dir / f"visual_prompt_model_epoch_{epoch}.pt"
    model.save_checkpoint(str(model_path))
    
    # 保存优化器状态
    optimizer_path = save_dir / f"optimizer_epoch_{epoch}.pt"
    torch.save(optimizer.state_dict(), optimizer_path)
    
    # 保存完整状态
    state_path = save_dir / f"checkpoint_epoch_{epoch}.pt"
    accelerator.save_state(str(state_path))
    
    return model_path, optimizer_path, state_path


def main():
    """主训练函数"""
    # 设置训练环境
    accelerator, config, logger = setup_training()
    if accelerator is None:
        return
    
    device = accelerator.device
    
    # 加载数据集
    train_loader, val_loader = load_datasets(config)
    if train_loader is None:
        return
    
    # 创建模型
    model = create_model(config, device)
    
    # 创建优化器
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=config['training']['learning_rate'],
        weight_decay=config['training']['weight_decay']
    )
    
    # 创建学习率调度器
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=config['training']['max_epochs'],
        eta_min=config['training']['learning_rate'] * 0.01
    )
    
    # 准备训练
    model, optimizer, train_loader, val_loader = accelerator.prepare(
        model, optimizer, train_loader, val_loader
    )
    
    # 创建保存目录
    save_dir = Path("checkpoints/visual_prompts")
    save_dir.mkdir(parents=True, exist_ok=True)
    
    # 训练循环
    logger.info("开始训练视觉学习提示模块...")
    best_val_loss = float('inf')
    patience_counter = 0
    
    for epoch in range(config['training']['max_epochs']):
        # 训练
        train_loss = train_epoch(
            model, train_loader, optimizer, accelerator, 
            config, logger, epoch
        )
        
        # 验证
        if epoch % config['training']['eval_epochs'] == 0:
            val_loss = validate(
                model, val_loader, accelerator, 
                config, logger, epoch
            )
            
            # 早停检查
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                patience_counter = 0
                
                # 保存最佳模型
                best_model_path, best_optimizer_path, best_state_path = save_checkpoint(
                    model, optimizer, epoch, save_dir, accelerator
                )
                logger.info(f"新的最佳模型已保存: {best_model_path}")
            else:
                patience_counter += 1
                
                if patience_counter >= config['training']['early_stopping_patience']:
                    logger.info(f"早停触发，在第 {epoch} 轮停止训练")
                    break
        
        # 保存检查点
        if epoch % config['training']['save_epochs'] == 0:
            model_path, optimizer_path, state_path = save_checkpoint(
                model, optimizer, epoch, save_dir, accelerator
            )
            logger.info(f"检查点已保存: {model_path}")
        
        # 更新学习率
        scheduler.step()
        
        # 记录学习率
        if accelerator.is_main_process:
            wandb.log({
                "learning_rate": scheduler.get_last_lr()[0],
                "epoch": epoch
            })
    
    # 保存最终模型
    final_model_path, final_optimizer_path, final_state_path = save_checkpoint(
        model, optimizer, epoch, save_dir, accelerator
    )
    
    logger.info("训练完成！")
    logger.info(f"最佳验证损失: {best_val_loss:.4f}")
    logger.info(f"最终模型: {final_model_path}")
    
    if accelerator.is_main_process:
        wandb.finish()


if __name__ == "__main__":
    main()
