#!/usr/bin/env python3
"""
GPU训练环境配置脚本

配置GPU训练环境，下载SAM源码，设置LoRA训练
"""

import os
import sys
import subprocess
import requests
import zipfile
from pathlib import Path
import torch

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root / 'src'))

from utils.logger import get_logger


def check_gpu_availability():
    """检查GPU可用性"""
    print("=" * 60)
    print("GPU环境检查")
    print("=" * 60)
    
    # 检查CUDA
    cuda_available = torch.cuda.is_available()
    print(f"CUDA可用: {cuda_available}")
    
    if cuda_available:
        print(f"CUDA版本: {torch.version.cuda}")
        print(f"GPU数量: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            gpu_name = torch.cuda.get_device_name(i)
            gpu_memory = torch.cuda.get_device_properties(i).total_memory / 1024**3
            print(f"  GPU {i}: {gpu_name} ({gpu_memory:.1f} GB)")
    else:
        print("❌ CUDA不可用，请安装CUDA和PyTorch GPU版本")
        return False
    
    return True


def install_accelerate_special():
    """专门安装accelerate包，使用多种方法"""
    print("尝试多种方法安装accelerate...")
    
    # 方法1: 使用conda
    try:
        print("方法1: 使用conda安装accelerate...")
        subprocess.run([
            "conda", "install", "-y", "-c", "conda-forge", "accelerate"
        ], check=True, capture_output=True)
        print("✅ accelerate conda安装成功")
        return True
    except subprocess.CalledProcessError:
        print("⚠️ conda安装失败")
    
    # 方法2: 使用pip + 清华镜像源
    try:
        print("方法2: 使用pip + 清华镜像源安装accelerate...")
        subprocess.run([
            sys.executable, "-m", "pip", "install", 
            "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
            "accelerate>=0.20.0"
        ], check=True, capture_output=True)
        print("✅ accelerate pip安装成功")
        return True
    except subprocess.CalledProcessError:
        print("⚠️ 清华镜像源安装失败")
    
    # 方法3: 使用pip + 阿里云镜像源
    try:
        print("方法3: 使用pip + 阿里云镜像源安装accelerate...")
        subprocess.run([
            sys.executable, "-m", "pip", "install", 
            "-i", "https://mirrors.aliyun.com/pypi/simple/",
            "accelerate>=0.20.0"
        ], check=True, capture_output=True)
        print("✅ accelerate pip安装成功")
        return True
    except subprocess.CalledProcessError:
        print("⚠️ 阿里云镜像源安装失败")
    
    # 方法4: 使用pip官方源
    try:
        print("方法4: 使用pip官方源安装accelerate...")
        subprocess.run([
            sys.executable, "-m", "pip", "install", "accelerate>=0.20.0"
        ], check=True, capture_output=True)
        print("✅ accelerate pip安装成功")
        return True
    except subprocess.CalledProcessError:
        print("⚠️ 官方源安装失败")
    
    # 方法5: 安装较旧版本
    try:
        print("方法5: 安装accelerate较旧版本...")
        subprocess.run([
            sys.executable, "-m", "pip", "install", 
            "-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
            "accelerate==0.21.0"
        ], check=True, capture_output=True)
        print("✅ accelerate 0.21.0安装成功")
        return True
    except subprocess.CalledProcessError:
        print("⚠️ 旧版本安装失败")
    
    print("❌ 所有accelerate安装方法都失败")
    return False


def install_gpu_dependencies():
    """安装GPU相关依赖"""
    print("\n" + "=" * 60)
    print("安装GPU依赖")
    print("=" * 60)
    
    # 首先尝试使用conda安装主要依赖
    conda_packages = [
        "pytorch>=2.0.0",
        "torchvision>=0.15.0", 
        "torchaudio>=2.0.0",
        "opencv",
        "scikit-image",
        "albumentations",
        "transformers",
        "timm",
        "wandb"
    ]
    
    print("使用conda安装主要依赖...")
    for package in conda_packages:
        print(f"安装 {package}...")
        try:
            subprocess.run([
                "conda", "install", "-y", "-c", "conda-forge", package
            ], check=True, capture_output=True)
            print(f"✅ {package} 安装成功")
        except subprocess.CalledProcessError as e:
            print(f"⚠️ {package} conda安装失败，将使用pip安装")
    
    # 专门安装accelerate
    print("\n安装accelerate...")
    if not install_accelerate_special():
        print("❌ accelerate安装失败，但继续安装其他依赖...")
    
    # 使用pip安装其他依赖，使用镜像源
    pip_packages = [
        "peft>=0.4.0",  # LoRA支持
        "bitsandbytes>=0.41.0",  # 量化支持
        "segment-anything>=1.0",
    ]
    
    # 设置pip镜像源
    pip_index_url = "https://pypi.tuna.tsinghua.edu.cn/simple"
    
    for package in pip_packages:
        print(f"使用pip安装 {package}...")
        try:
            # 首先尝试使用清华镜像源
            subprocess.run([
                sys.executable, "-m", "pip", "install", 
                "-i", pip_index_url, package
            ], check=True, capture_output=True)
            print(f"✅ {package} 安装成功")
        except subprocess.CalledProcessError as e:
            print(f"⚠️ 使用清华镜像源安装 {package} 失败，尝试官方源...")
            try:
                subprocess.run([
                    sys.executable, "-m", "pip", "install", package
                ], check=True, capture_output=True)
                print(f"✅ {package} 安装成功")
            except subprocess.CalledProcessError as e2:
                print(f"❌ {package} 安装失败: {e2}")
                return False
    
    return True


def verify_installation():
    """验证安装是否成功"""
    print("\n" + "=" * 60)
    print("验证安装")
    print("=" * 60)
    
    # 检查关键包
    critical_packages = [
        "torch",
        "torchvision", 
        "torchaudio",
        "accelerate",
        "peft",
        "transformers"
    ]
    
    missing_packages = []
    
    for package in critical_packages:
        try:
            __import__(package)
            print(f"✅ {package} 可用")
        except ImportError:
            print(f"❌ {package} 不可用")
            missing_packages.append(package)
    
    if missing_packages:
        print(f"\n⚠️ 以下包安装失败: {', '.join(missing_packages)}")
        print("建议手动安装:")
        for package in missing_packages:
            if package == "accelerate":
                print(f"  conda install -c conda-forge {package}")
            else:
                print(f"  pip install {package}")
        return False
    
    print("\n✅ 所有关键包安装成功！")
    return True


def download_sam_repositories():
    """下载SAM源码仓库"""
    print("\n" + "=" * 60)
    print("下载SAM源码")
    print("=" * 60)
    
    # 创建external目录
    external_dir = project_root / "external"
    external_dir.mkdir(exist_ok=True)
    
    # SAM1仓库
    sam1_dir = external_dir / "segment-anything"
    if not sam1_dir.exists():
        print("下载SAM1源码...")
        try:
            subprocess.run([
                "git", "clone", 
                "https://github.com/facebookresearch/segment-anything.git",
                str(sam1_dir)
            ], check=True)
            print("✅ SAM1源码下载成功")
        except subprocess.CalledProcessError as e:
            print(f"❌ SAM1源码下载失败: {e}")
            return False
    else:
        print("✅ SAM1源码已存在")
    
    # SAM2仓库
    sam2_dir = external_dir / "segment-anything-2"
    if not sam2_dir.exists():
        print("下载SAM2源码...")
        try:
            subprocess.run([
                "git", "clone",
                "https://github.com/facebookresearch/segment-anything-2.git", 
                str(sam2_dir)
            ], check=True)
            print("✅ SAM2源码下载成功")
        except subprocess.CalledProcessError as e:
            print(f"❌ SAM2源码下载失败: {e}")
            return False
    else:
        print("✅ SAM2源码已存在")
    
    return True


def download_sam_checkpoints():
    """下载SAM预训练模型"""
    print("\n" + "=" * 60)
    print("下载SAM预训练模型")
    print("=" * 60)
    
    checkpoints_dir = project_root / "checkpoints" / "sam"
    checkpoints_dir.mkdir(parents=True, exist_ok=True)
    
    # SAM1模型（参数量较小的）
    sam1_models = {
        "sam_vit_b_01ec64.pth": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth",
        "sam_vit_l_0b3195.pth": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth"
    }
    
    for model_name, url in sam1_models.items():
        model_path = checkpoints_dir / model_name
        if not model_path.exists():
            print(f"下载 {model_name}...")
            try:
                response = requests.get(url, stream=True)
                response.raise_for_status()
                
                with open(model_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
                
                print(f"✅ {model_name} 下载成功")
            except Exception as e:
                print(f"❌ {model_name} 下载失败: {e}")
        else:
            print(f"✅ {model_name} 已存在")
    
    # SAM2模型
    sam2_models = {
        "sam2_hiera_large.pt": "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_large.pt",
        "sam2_hiera_base_plus.pt": "https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_base_plus.pt"
    }
    
    sam2_dir = checkpoints_dir / "sam2"
    sam2_dir.mkdir(exist_ok=True)
    
    for model_name, url in sam2_models.items():
        model_path = sam2_dir / model_name
        if not model_path.exists():
            print(f"下载 {model_name}...")
            try:
                response = requests.get(url, stream=True)
                response.raise_for_status()
                
                with open(model_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
                
                print(f"✅ {model_name} 下载成功")
            except Exception as e:
                print(f"❌ {model_name} 下载失败: {e}")
        else:
            print(f"✅ {model_name} 已存在")
    
    return True


def create_lora_config():
    """创建LoRA配置文件"""
    print("\n" + "=" * 60)
    print("创建LoRA配置")
    print("=" * 60)
    
    lora_config = {
        "lora": {
            "r": 16,  # LoRA rank
            "lora_alpha": 32,  # LoRA alpha
            "target_modules": [
                "q_proj", "k_proj", "v_proj", "o_proj",  # 注意力层
                "gate_proj", "up_proj", "down_proj"  # MLP层
            ],
            "lora_dropout": 0.1,
            "bias": "none",
            "task_type": "FEATURE_EXTRACTION"
        },
        "training": {
            "learning_rate": 1e-4,
            "batch_size": 4,
            "gradient_accumulation_steps": 4,
            "max_steps": 1000,
            "warmup_steps": 100,
            "save_steps": 200,
            "eval_steps": 200,
            "logging_steps": 50
        },
        "model": {
            "base_model": "sam_vit_b",  # 使用较小的模型
            "checkpoint_path": "checkpoints/sam/sam_vit_b_01ec64.pth"
        }
    }
    
    config_file = project_root / "configs" / "lora_training.yaml"
    import yaml
    
    with open(config_file, 'w', encoding='utf-8') as f:
        yaml.dump(lora_config, f, default_flow_style=False, allow_unicode=True)
    
    print(f"✅ LoRA配置已保存: {config_file}")
    return True


def create_gpu_training_script():
    """创建GPU训练脚本"""
    print("\n" + "=" * 60)
    print("创建GPU训练脚本")
    print("=" * 60)
    
    training_script = '''#!/usr/bin/env python3
"""
GPU训练脚本

使用LoRA方法训练SAM模型
"""

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from peft import LoraConfig, get_peft_model, TaskType
from accelerate import Accelerator
import wandb
from pathlib import Path
import sys

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root / 'src'))
sys.path.insert(0, str(project_root / 'external' / 'segment-anything'))

from utils.logger import get_logger
from utils.config import load_config
from data.dataset import EchoVideoDataset
from sam.lora.sam_lora import SAMLoRA


def main():
    """主训练函数"""
    logger = get_logger("GPUTraining")
    
    # 初始化accelerator
    accelerator = Accelerator()
    device = accelerator.device
    
    logger.info(f"使用设备: {device}")
    
    # 加载配置
    config = load_config("configs/lora_training.yaml")
    
    # 初始化wandb
    if accelerator.is_main_process:
        wandb.init(
            project="sam-echo-video",
            config=config,
            name="lora_training"
        )
    
    # 加载数据集
    train_dataset = EchoVideoDataset(
        data_dir="data/processed/segmentation",
        split="train"
    )
    
    val_dataset = EchoVideoDataset(
        data_dir="data/processed/segmentation", 
        split="val"
    )
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=config['training']['batch_size'],
        shuffle=True,
        num_workers=4
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config['training']['batch_size'],
        shuffle=False,
        num_workers=4
    )
    
    # 初始化模型
    model = SAMLoRA(
        base_model=config['model']['base_model'],
        checkpoint_path=config['model']['checkpoint_path'],
        lora_config=config['lora']
    )
    
    # 准备训练
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=config['training']['learning_rate']
    )
    
    model, optimizer, train_loader, val_loader = accelerator.prepare(
        model, optimizer, train_loader, val_loader
    )
    
    # 训练循环
    model.train()
    step = 0
    
    for epoch in range(config['training']['max_steps'] // len(train_loader)):
        for batch in train_loader:
            # 前向传播
            loss = model.training_step(batch)
            
            # 反向传播
            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()
            
            step += 1
            
            # 日志记录
            if step % config['training']['logging_steps'] == 0:
                logger.info(f"Step {step}, Loss: {loss.item():.4f}")
                if accelerator.is_main_process:
                    wandb.log({"train_loss": loss.item(), "step": step})
            
            # 验证
            if step % config['training']['eval_steps'] == 0:
                model.eval()
                val_loss = 0
                val_steps = 0
                
                with torch.no_grad():
                    for val_batch in val_loader:
                        val_loss += model.validation_step(val_batch)
                        val_steps += 1
                
                val_loss /= val_steps
                logger.info(f"Validation Loss: {val_loss:.4f}")
                
                if accelerator.is_main_process:
                    wandb.log({"val_loss": val_loss, "step": step})
                
                model.train()
            
            # 保存检查点
            if step % config['training']['save_steps'] == 0:
                accelerator.save_state(f"checkpoints/lora/checkpoint-{step}")
                logger.info(f"检查点已保存: checkpoint-{step}")
            
            if step >= config['training']['max_steps']:
                break
        
        if step >= config['training']['max_steps']:
            break
    
    # 保存最终模型
    accelerator.save_state("checkpoints/lora/final")
    logger.info("训练完成！")


if __name__ == "__main__":
    main()
'''
    
    script_path = project_root / "scripts" / "train_gpu_lora.py"
    with open(script_path, 'w', encoding='utf-8') as f:
        f.write(training_script)
    
    print(f"✅ GPU训练脚本已创建: {script_path}")
    return True


def main():
    """主函数"""
    logger = get_logger("GPUEnvironmentSetup")
    
    print("=" * 60)
    print("SAM-Echo-Video GPU环境配置")
    print("=" * 60)
    
    # 检查GPU
    if not check_gpu_availability():
        print("❌ GPU环境检查失败，请先安装CUDA和PyTorch GPU版本")
        return False
    
    # 安装依赖
    if not install_gpu_dependencies():
        print("❌ GPU依赖安装失败")
        return False
    
    # 验证安装
    if not verify_installation():
        print("⚠️ 部分依赖安装失败，但继续配置...")
    
    # 下载SAM源码
    if not download_sam_repositories():
        print("❌ SAM源码下载失败")
        return False
    
    # 下载预训练模型
    if not download_sam_checkpoints():
        print("❌ SAM预训练模型下载失败")
        return False
    
    # 创建LoRA配置
    if not create_lora_config():
        print("❌ LoRA配置创建失败")
        return False
    
    # 创建训练脚本
    if not create_gpu_training_script():
        print("❌ GPU训练脚本创建失败")
        return False
    
    print("\n" + "=" * 60)
    print("✅ GPU环境配置完成！")
    print("=" * 60)
    
    print("\n下一步:")
    print("1. 运行完整模式数据预处理: python scripts/preprocessing/preprocess_echo.py --full_mode")
    print("2. 开始LoRA训练: python scripts/train_gpu_lora.py")
    print("3. 监控训练进度: wandb")
    
    return True


if __name__ == "__main__":
    success = main()
    if not success:
        print("\n❌ GPU环境配置失败")
        sys.exit(1)
    else:
        print("\n🎉 GPU环境配置成功！")
