#!/usr/bin/env python3
"""
vLLM 翻译服务主程序 (vLLM Translation Service Main Program)
仅负责启动app和绑定路由 (Only responsible for starting app and binding routes)
"""

import asyncio
import argparse
import logging
import os
import re
import sys
import time
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Optional

# vLLM imports
from vllm import LLM

# FastAPI imports
try:
    import uvicorn
    from fastapi import FastAPI, HTTPException
    from fastapi.middleware.cors import CORSMiddleware
    from pydantic import BaseModel, Field
    FASTAPI_AVAILABLE = True
except ImportError:
    FASTAPI_AVAILABLE = False
    print("⚠️  FastAPI not available. Only CLI mode will work.")

# Config import
from config import (
    AppConfig,
    get_model_config,
    get_model_path,
    print_config_summary
)

# Translation API import
if FASTAPI_AVAILABLE:
    from translate import router as translate_router, set_model


def is_valid_hf_model_id(model_id):
    """
    Check if the string is a valid Hugging Face model ID format.
    Valid format: username/model-name or organization/model-name
    """
    # Basic pattern for HF model IDs: contains at least one slash and valid characters
    pattern = r'^[a-zA-Z0-9]([a-zA-Z0-9._-]*[a-zA-Z0-9])?/[a-zA-Z0-9]([a-zA-Z0-9._-]*[a-zA-Z0-9])?$'
    return bool(re.match(pattern, model_id))


def validate_local_model_path(path):
    """
    Validate if a local path contains the required configuration files.
    Returns tuple (is_valid, config_type, error_message)
    """
    expanded_path = Path(os.path.expanduser(path))
    
    if not expanded_path.exists():
        return False, None, f"Path does not exist: {expanded_path}"
    
    if not expanded_path.is_dir():
        return False, None, f"Path is not a directory: {expanded_path}"
    
    # Check for Hugging Face config.json
    config_json = expanded_path / "config.json"
    if config_json.exists():
        return True, "huggingface", None
    
    # Check for Mistral params.json
    params_json = expanded_path / "params.json"
    if params_json.exists():
        return True, "mistral", None
    
    return False, None, f"No recognized configuration file found. Expected 'config.json' (HuggingFace) or 'params.json' (Mistral) in: {expanded_path}"


def validate_and_resolve_model_path(model_path_or_id):
    """
    验证并解析模型路径或 ID
    Validate and resolve model path or ID.
    支持本地路径和 Hugging Face 模型 ID
    Supports both local paths and Hugging Face model IDs.
    返回元组 (resolved_path, is_local, error_message)
    Returns tuple (resolved_path, is_local, error_message)
    """
    # 首先，检查是否为本地路径（包含路径分隔符或以 ~ 开头）
    # First, check if it's a local path (contains path separators or starts with ~)
    if ('/' in model_path_or_id and not model_path_or_id.count('/') == 1) or model_path_or_id.startswith('~') or model_path_or_id.startswith('.') or model_path_or_id.startswith('/'):
        # 作为本地路径处理 (Treat as local path)
        is_valid, config_type, error_msg = validate_local_model_path(model_path_or_id)
        if is_valid:
            print(f"✓ 检测到 {config_type} 模型配置 (Detected {config_type} model configuration)")
            return os.path.expanduser(model_path_or_id), True, None
        else:
            return None, True, error_msg

    # 检查是否为有效的 HF 模型 ID 格式 (Check if it's a valid HF model ID format)
    elif is_valid_hf_model_id(model_path_or_id):
        # 这是一个 Hugging Face 模型 ID - 让 vLLM 处理下载
        # It's a Hugging Face model ID - let vLLM handle the download
        print("✓ 检测到 Hugging Face 模型 ID (Detected Hugging Face model ID)")
        return model_path_or_id, False, None

    else:
        # 无效格式 (Invalid format)
        return None, False, f"无效的模型路径或 ID (Invalid model path or ID): '{model_path_or_id}'. 请提供以下之一 (Please provide either):\n1. 有效的 Hugging Face 仓库 ID（格式：username/model-name）(A valid Hugging Face repository ID (format: username/model-name))\n2. 包含 'config.json'（HuggingFace）或 'params.json'（Mistral）的本地目录路径 (A local directory path containing 'config.json' (HuggingFace) or 'params.json' (Mistral))"


# ==================== FastAPI 相关代码 (FastAPI Related Code) ====================

# 全局模型实例 (Global model instance)
model: Optional[LLM] = None
model_load_time: Optional[float] = None


def load_model_with_validation(model_path_or_id, **kwargs):
    """
    加载 vLLM 模型并进行验证和错误处理
    Load a vLLM model with proper validation and error handling.
    """
    global model, model_load_time

    # 验证并解析模型路径 (Validate and resolve the model path)
    resolved_path, is_local, error_message = validate_and_resolve_model_path(model_path_or_id)

    if error_message:
        print(f"❌ 错误 (Error): {error_message}")
        return None

    print(f"🔄 加载{'本地' if is_local else '远程'}模型 (Loading {'local' if is_local else 'remote'} model): {resolved_path}")

    try:
        start_time = time.time()
        model = LLM(model=resolved_path, **kwargs)
        model_load_time = time.time() - start_time
        print(f"✅ 模型加载成功！耗时 {model_load_time:.2f} 秒 (Model loaded successfully! Time taken: {model_load_time:.2f} seconds)")
        return model
    except Exception as e:
        print(f"❌ 模型加载失败 (Failed to load model): {e}")
        print("\n📋 请验证以下要求 (Please verify the following requirements):")
        print("1. 提供有效的 Hugging Face 仓库 ID (Provide a valid Hugging Face repository ID).")
        print("2. 指定包含识别配置文件的本地目录 (Specify a local directory that contains a recognized configuration file).")
        print("   - 对于 Hugging Face 模型：确保存在 'config.json' (For Hugging Face models: ensure the presence of a 'config.json').")
        print("   - 对于 Mistral 模型：确保存在 'params.json' (For Mistral models: ensure the presence of a 'params.json').")
        return None


async def load_model_async():
    """异步加载模型 (Asynchronously load model)"""
    global model, model_load_time

    try:
        logging.info("🚀 开始加载模型... (Starting to load model...)")

        # 获取模型路径和配置 (Get model path and configuration)
        model_path = get_model_path()
        model_config = get_model_config()

        # 加载模型 (Load model)
        loaded_model = load_model_with_validation(model_path, **model_config)

        if loaded_model is None:
            raise ValueError("模型加载失败 (Model loading failed)")

        # 设置翻译模块的模型实例 (Set model instance for translation module)
        if FASTAPI_AVAILABLE:
            set_model(loaded_model)

        logging.info("🎉 模型加载完成 (Model loading completed)")

    except Exception as e:
        logging.error(f"❌ 模型加载失败 (Model loading failed): {str(e)}")
        raise


if FASTAPI_AVAILABLE:
    @asynccontextmanager
    async def lifespan(app: FastAPI):
        """应用生命周期管理 (Application lifecycle management)"""
        # 启动时 (On startup)
        print_config_summary()
        logging.info("🔄 正在启动应用... (Starting application...)")

        try:
            await load_model_async()
            logging.info("✅ 应用启动完成 (Application startup completed)")
        except Exception as e:
            logging.error(f"❌ 应用启动失败 (Application startup failed): {str(e)}")
            raise

        yield

        # 关闭时 (On shutdown)
        logging.info("🔄 正在关闭应用... (Shutting down application...)")
        global model
        if model:
            # 清理模型资源 (Clean up model resources)
            del model
            model = None
        logging.info("✅ 应用关闭完成 (Application shutdown completed)")


    def create_app() -> FastAPI:
        """创建 FastAPI 应用 (Create FastAPI application)"""
        app = FastAPI(
            title="vLLM 翻译服务 (vLLM Translation Service)",
            description="基于vLLM的高性能翻译服务 (High-performance translation service based on vLLM)",
            version="1.0.0",
            lifespan=lifespan
        )

        # 添加CORS中间件 (Add CORS middleware)
        app.add_middleware(
            CORSMiddleware,
            allow_origins=["*"],
            allow_credentials=True,
            allow_methods=["*"],
            allow_headers=["*"],
        )

        # 绑定翻译路由 (Bind translation routes)
        app.include_router(translate_router)

        # 添加基础路由 (Add basic routes)
        add_basic_routes(app)

        return app


    def add_basic_routes(app: FastAPI):
        """添加基础 API 路由 (Add basic API routes)"""

        @app.get("/")
        async def root():
            """根路径 (Root path)"""
            return {
                "message": "vLLM 翻译服务正在运行 (vLLM Translation Service is running)",
                "status": "healthy",
                "model_loaded": model is not None,
                "service_type": "translation"
            }

        @app.get("/health")
        async def health_check():
            """健康检查 (Health check)"""
            return {
                "status": "healthy" if model is not None else "model_not_loaded",
                "timestamp": time.time(),
                "model_loaded": model is not None,
                "service": "translation"
            }

        @app.get("/status")
        async def get_model_status():
            """获取模型状态 (Get model status)"""
            try:
                import torch
                memory_info = {
                    "gpu_memory_allocated": torch.cuda.memory_allocated() / 1024**3 if torch.cuda.is_available() else 0,
                    "gpu_memory_reserved": torch.cuda.memory_reserved() / 1024**3 if torch.cuda.is_available() else 0,
                    "gpu_memory_total": torch.cuda.get_device_properties(0).total_memory / 1024**3 if torch.cuda.is_available() else 0
                }
            except Exception:
                memory_info = {}

            return {
                "is_loaded": model is not None,
                "model_path": get_model_path(),
                "load_time": model_load_time,
                "config_type": AppConfig.MODEL_CONFIG_TYPE,
                "memory_usage": memory_info,
                "service_type": "translation"
            }





def run_server_mode(config_type: str = "memory_optimized", host: str = "0.0.0.0", port: int = 8000):
    """运行服务器模式 (Run server mode)"""
    if not FASTAPI_AVAILABLE:
        print("❌ FastAPI 不可用，无法启动服务器模式 (FastAPI not available, cannot start server mode)")
        print("请安装 FastAPI: pip install fastapi uvicorn (Please install FastAPI: pip install fastapi uvicorn)")
        sys.exit(1)

    print("🌐 服务器模式 - FastAPI vLLM 服务 (Server Mode - FastAPI vLLM Service)")
    print("=" * 60)

    # 设置配置 (Set configuration)
    AppConfig.MODEL_CONFIG_TYPE = config_type
    AppConfig.HOST = host
    AppConfig.PORT = port

    # 配置日志 (Configure logging)
    logging.basicConfig(level=getattr(logging, AppConfig.LOG_LEVEL))

    # 创建应用 (Create application)
    app = create_app()

    print(f"\n🚀 启动服务器... (Starting server...)")
    print(f"📍 访问地址 (Access URL): http://{host}:{port}")
    print(f"📖 API文档 (API Documentation): http://{host}:{port}/docs")
    print(f"🔧 配置类型 (Configuration type): {config_type}")

    # 启动服务器 (Start server)
    try:
        uvicorn.run(
            app,
            host=host,
            port=port,
            log_level=AppConfig.LOG_LEVEL.lower()
        )
    except KeyboardInterrupt:
        print("\n👋 服务器已停止 (Server stopped)")
    except Exception as e:
        print(f"\n❌ 服务器启动失败 (Server startup failed): {e}")
        sys.exit(1)


def main():
    """主函数 - 启动翻译服务 (Main function - Start translation service)"""
    parser = argparse.ArgumentParser(description="vLLM 翻译服务 (vLLM Translation Service)")

    parser.add_argument("--config", choices=["memory_optimized", "high_performance", "low_memory", "multi_gpu", "sliding_window"],
                       default="sliding_window", help="模型配置类型 (Model configuration type)")
    parser.add_argument("--host", default="0.0.0.0", help="服务器主机地址 (Server host address)")
    parser.add_argument("--port", type=int, default=8001, help="服务器端口 (Server port)")
    parser.add_argument("--model-path", help="模型路径或ID (Model path or ID)")
    parser.add_argument("--log-level", choices=["DEBUG", "INFO", "WARNING", "ERROR"],
                       default="INFO", help="日志级别 (Log level)")

    args = parser.parse_args()

    # 设置日志级别 (Set log level)
    AppConfig.LOG_LEVEL = args.log_level

    # 确定模型路径 (Determine model path)
    if args.model_path:
        os.environ["MODEL_PATH"] = args.model_path

    # 启动翻译服务 (Start translation service)
    run_server_mode(args.config, args.host, args.port)


if __name__ == "__main__":
    main()
