"""
应用配置文件
"""
from typing import List, Optional
from pydantic_settings import BaseSettings

import os
from pathlib import Path

class Settings(BaseSettings):
    """应用配置类"""
    
    # 项目信息
    PROJECT_NAME: str = "视频分析 API"
    DESCRIPTION: str = "基于 VideoChat-Flash-Qwen2_5-2B_res448 模型的视频内容分析 API"
    VERSION: str = "1.0.0"
    API_V1_STR: str = "/api/v1"
    
    # 服务器配置
    HOST: str = "0.0.0.0"
    PORT: int = 8000
    ENVIRONMENT: str = "development"
    
    # CORS 配置
    BACKEND_CORS_ORIGINS: List[str] = ["http://localhost:3000", "http://127.0.0.1:3000"]
    
    # 文件存储配置
    BASE_DIR: Path = Path(__file__).resolve().parent.parent.parent
    UPLOAD_DIR: str = str(BASE_DIR / "uploads")
    TEMP_DIR: str = str(BASE_DIR / "temp")
    MAX_FILE_SIZE: int = 100 * 1024 * 1024  # 100MB
    ALLOWED_VIDEO_EXTENSIONS: List[str] = [".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv"]
    
    # 模型配置 - VideoChat-Flash
    MODEL_ID: str = "OpenGVLab/VideoChat-Flash-Qwen2_5-2B_res448"
    # MODEL_ID: str = "OpenGVLab/VideoChat-Flash-Qwen2_5-7B_InternVideo2-1B"
    # MODEL_ID: str = "OpenGVLab/VideoChat-Flash-Qwen2-7B_res448"
    MAX_FRAMES: int = 60  # VideoChat-Flash 支持更多帧
    MAX_NEW_TOKENS: int = 1024
    TORCH_DEVICE: str = "cuda" if os.environ.get("CUDA_AVAILABLE") == "true" else "cpu"
    TORCH_DTYPE: str = "bfloat16"  # VideoChat-Flash 也使用 bfloat16
    
    # VideoChat-Flash 特有配置
    MM_LLM_COMPRESS: bool = True  # 是否启用压缩
    TEMPERATURE: float = 0.0  # 生成温度
    TOP_P: float = 0.1  # Top-p 采样
    NUM_BEAMS: int = 1  # Beam search 数量
    
    # GPU 配置
    GPU_MEMORY_FRACTION: float = 0.8
    LOW_CPU_MEM_USAGE: bool = True
    
    # 启动配置
    PRELOAD_MODEL_ON_STARTUP: bool = True  # 是否在启动时预加载模型
    STARTUP_TIMEOUT: int = 300  # 启动超时时间（秒）
    
    class Config:
        env_file = ".env"
        case_sensitive = True

# 创建全局配置实例
settings = Settings()