#!/usr/bin/env python3
"""
vLLM 模型配置文件 - 优化显存使用
vLLM Model Configuration - Optimized for Memory Usage
"""

import os
from typing import Dict, Any, Optional

class ModelConfig:
    """模型配置类 (Model Configuration Class)"""
    
    # 默认模型路径 (Default model path)
    DEFAULT_MODEL_PATH = "~/ProjectWorkspace/shellMakefileDeploymentProjects/linuxDeployProjects/vllmProjects/huggingface_download_tool/models/ByteDance-Seed/Seed-X-PPO-7B"
    
    # 显存优化配置 (Memory Optimized Configuration)
    # 适用于24GB显存加载13GB模型 (For 24GB VRAM loading 13GB model)
    MEMORY_OPTIMIZED_CONFIG = {
        "max_num_seqs": 32,                    # 减少并发序列数 (Reduce concurrent sequences)
        "tensor_parallel_size": 1,             # 单GPU模式 (Single GPU mode)
        "enable_prefix_caching": False,        # 禁用前缀缓存（避免与滑动窗口冲突）(Disable prefix caching to avoid sliding window conflict)
        "disable_sliding_window": False,       # 保持滑动窗口（某些模型需要）(Keep sliding window for certain models)
        "gpu_memory_utilization": 0.75,        # 降低显存使用率到75% (Lower memory utilization to 75%)
        "max_model_len": 4096,                # 限制最大序列长度 (Limit max sequence length)
        "enforce_eager": True,                 # 使用eager模式减少显存 (Use eager mode to reduce memory)
        "disable_custom_all_reduce": True,     # 禁用自定义all_reduce (Disable custom all_reduce)
        "swap_space": 4,                      # 启用4GB交换空间 (Enable 4GB swap space)
        "cpu_offload_gb": 2,                  # CPU卸载2GB (CPU offload 2GB)
    }
    
    # 高性能配置 (High Performance Configuration)
    # 如果显存充足可以使用 (Use if VRAM is sufficient)
    HIGH_PERFORMANCE_CONFIG = {
        "max_num_seqs": 128,
        "tensor_parallel_size": 1,
        "enable_prefix_caching": False,        # 禁用前缀缓存（避免与滑动窗口冲突）(Disable prefix caching)
        "disable_sliding_window": False,       # 保持滑动窗口 (Keep sliding window)
        "gpu_memory_utilization": 0.85,
        "max_model_len": 8192,
        "enforce_eager": False,
        "disable_custom_all_reduce": True,
    }
    
    # 低显存配置 (Low Memory Configuration)
    # 适用于显存不足的情况 (For insufficient VRAM situations)
    LOW_MEMORY_CONFIG = {
        "max_num_seqs": 16,
        "tensor_parallel_size": 1,
        "enable_prefix_caching": False,        # 禁用前缀缓存（避免与滑动窗口冲突）(Disable prefix caching)
        "disable_sliding_window": False,       # 保持滑动窗口 (Keep sliding window)
        "gpu_memory_utilization": 0.6,
        "max_model_len": 2048,
        "enforce_eager": True,
        "disable_custom_all_reduce": True,
        "swap_space": 8,
        "cpu_offload_gb": 4,
    }
    
    # 多GPU配置 (Multi-GPU Configuration)
    # 如果有多个GPU可以使用 (Use if multiple GPUs available)
    MULTI_GPU_CONFIG = {
        "max_num_seqs": 256,
        "tensor_parallel_size": 2,  # 根据GPU数量调整 (Adjust based on GPU count)
        "enable_prefix_caching": False,        # 禁用前缀缓存（避免与滑动窗口冲突）(Disable prefix caching)
        "disable_sliding_window": False,       # 保持滑动窗口 (Keep sliding window)
        "gpu_memory_utilization": 0.9,
        "max_model_len": 8192,
        "enforce_eager": False,
        "disable_custom_all_reduce": False,
    }

    # 滑动窗口兼容配置 (Sliding Window Compatible Configuration)
    # 专门针对使用滑动窗口的模型（如Seed-X-PPO-7B）(Specifically for sliding window models like Seed-X-PPO-7B)
    SLIDING_WINDOW_CONFIG = {
        "max_num_seqs": 24,                    # 进一步减少并发序列数 (Further reduce concurrent sequences)
        "tensor_parallel_size": 1,             # 单GPU模式 (Single GPU mode)
        "enable_prefix_caching": False,        # 必须禁用前缀缓存 (Must disable prefix caching)
        "disable_sliding_window": False,       # 保持滑动窗口机制 (Keep sliding window mechanism)
        "gpu_memory_utilization": 0.7,         # 更保守的显存使用 (More conservative memory usage)
        "max_model_len": 3072,                # 更短的序列长度 (Shorter sequence length)
        "enforce_eager": True,                 # 使用eager模式 (Use eager mode)
        "disable_custom_all_reduce": True,     # 禁用自定义all_reduce (Disable custom all_reduce)
        "swap_space": 6,                      # 增加交换空间 (Increase swap space)
        "cpu_offload_gb": 3,                  # 增加CPU卸载 (Increase CPU offload)
        "trust_remote_code": True,            # 信任远程代码（某些模型需要）(Trust remote code for certain models)
    }

class SamplingConfig:
    """采样配置类 (Sampling Configuration Class)"""
    
    # 默认采样参数 (Default sampling parameters)
    DEFAULT_SAMPLING_PARAMS = {
        "temperature": 0.0,           # 贪婪解码 (Greedy decoding)
        "max_tokens": 512,           # 最大生成token数 (Max generated tokens)
        "skip_special_tokens": True, # 跳过特殊token (Skip special tokens)
        "top_p": 0.9,               # Top-p采样 (Top-p sampling)
        "top_k": 50,                # Top-k采样 (Top-k sampling)
    }
    
    # 创意生成配置 (Creative generation configuration)
    CREATIVE_SAMPLING_PARAMS = {
        "temperature": 0.8,
        "max_tokens": 1024,
        "skip_special_tokens": True,
        "top_p": 0.95,
        "top_k": 100,
        "repetition_penalty": 1.1,
    }
    
    # 精确生成配置 (Precise generation configuration)
    PRECISE_SAMPLING_PARAMS = {
        "temperature": 0.1,
        "max_tokens": 256,
        "skip_special_tokens": True,
        "top_p": 0.8,
        "top_k": 20,
        "repetition_penalty": 1.05,
    }

class AppConfig:
    """应用配置类 (Application Configuration Class)"""
    
    # FastAPI配置 (FastAPI Configuration)
    HOST = "0.0.0.0"
    PORT = 8001
    RELOAD = False  # 生产环境设为False (Set to False in production)
    
    # 日志配置 (Logging Configuration)
    LOG_LEVEL = "INFO"
    
    # 模型配置选择 (Model Configuration Selection)
    # 可选值: "memory_optimized", "high_performance", "low_memory", "multi_gpu", "sliding_window"
    # Options: "memory_optimized", "high_performance", "low_memory", "multi_gpu", "sliding_window"
    MODEL_CONFIG_TYPE = "sliding_window"  # 默认使用滑动窗口兼容配置 (Default to sliding window compatible config)
    
    # 采样配置选择 (Sampling Configuration Selection)
    # 可选值: "default", "creative", "precise"
    # Options: "default", "creative", "precise"
    SAMPLING_CONFIG_TYPE = "default"

def get_model_config(config_type: str = None) -> Dict[str, Any]:
    """
    获取模型配置 (Get model configuration)
    
    Args:
        config_type: 配置类型 (Configuration type)
        
    Returns:
        Dict[str, Any]: 模型配置字典 (Model configuration dictionary)
    """
    if config_type is None:
        config_type = AppConfig.MODEL_CONFIG_TYPE
    
    config_map = {
        "memory_optimized": ModelConfig.MEMORY_OPTIMIZED_CONFIG,
        "high_performance": ModelConfig.HIGH_PERFORMANCE_CONFIG,
        "low_memory": ModelConfig.LOW_MEMORY_CONFIG,
        "multi_gpu": ModelConfig.MULTI_GPU_CONFIG,
        "sliding_window": ModelConfig.SLIDING_WINDOW_CONFIG,
    }
    
    config = config_map.get(config_type, ModelConfig.MEMORY_OPTIMIZED_CONFIG)
    print(f"🔧 使用配置类型 (Using config type): {config_type}")
    print(f"📊 配置详情 (Config details): {config}")
    
    return config.copy()

def get_sampling_config(config_type: str = None) -> Dict[str, Any]:
    """
    获取采样配置 (Get sampling configuration)
    
    Args:
        config_type: 配置类型 (Configuration type)
        
    Returns:
        Dict[str, Any]: 采样配置字典 (Sampling configuration dictionary)
    """
    if config_type is None:
        config_type = AppConfig.SAMPLING_CONFIG_TYPE
    
    config_map = {
        "default": SamplingConfig.DEFAULT_SAMPLING_PARAMS,
        "creative": SamplingConfig.CREATIVE_SAMPLING_PARAMS,
        "precise": SamplingConfig.PRECISE_SAMPLING_PARAMS,
    }
    
    return config_map.get(config_type, SamplingConfig.DEFAULT_SAMPLING_PARAMS).copy()

def get_model_path() -> str:
    """
    获取模型路径 (Get model path)
    
    Returns:
        str: 模型路径 (Model path)
    """
    # 优先使用环境变量 (Prioritize environment variable)
    model_path = os.getenv("MODEL_PATH", ModelConfig.DEFAULT_MODEL_PATH)
    return os.path.expanduser(model_path)

def print_config_summary():
    """打印配置摘要 (Print configuration summary)"""
    print("=" * 60)
    print("🚀 vLLM 模型配置摘要 (vLLM Model Configuration Summary)")
    print("=" * 60)
    print(f"📍 模型路径 (Model Path): {get_model_path()}")
    print(f"🔧 模型配置 (Model Config): {AppConfig.MODEL_CONFIG_TYPE}")
    print(f"🎯 采样配置 (Sampling Config): {AppConfig.SAMPLING_CONFIG_TYPE}")
    print(f"🌐 服务地址 (Server Address): {AppConfig.HOST}:{AppConfig.PORT}")
    print("=" * 60)

# 环境变量配置覆盖 (Environment variable configuration override)
if __name__ == "__main__":
    # 测试配置 (Test configuration)
    print_config_summary()
    
    print("\n🧪 测试不同配置 (Testing different configurations):")
    for config_type in ["memory_optimized", "high_performance", "low_memory"]:
        print(f"\n--- {config_type.upper()} ---")
        config = get_model_config(config_type)
        print(f"max_num_seqs: {config['max_num_seqs']}")
        print(f"gpu_memory_utilization: {config['gpu_memory_utilization']}")
        print(f"max_model_len: {config['max_model_len']}")
