from pydantic import BaseModel
import torch
import os
from typing import Optional, Dict


class AppConfig(BaseModel):
    host: str = "0.0.0.0"
    port: int = 9115
    log_level: str = "info"
    timeout_keep_alive: int = 300
    base_url: str = "http://localhost:9115"
    ollama_host: str = "http://localhost:11434"

class ModelConfig(BaseModel):
    whisperx_model: str = "base"
    whisperx_device: str = "cuda" 
    whisperx_model_dir: str = r"G:\PythonSoft\huggingface_models\whisperx"  # 本地模型路径
    
    # whispex暂时有问题，先用whisper代替
    whisper_model: str = "base"
    whisper_device: str = "cuda"  
    whisper_model_dir: str = r"G:\PythonSoft\huggingface_models\whisperx"  # 本地模型路径

    # ollama 配置
    ollama_host: str = "http://localhost:11434"
    ollama_model: str = "llama3.2-vision"
    ollama_chat_model : str = "deepseek-r1:7b"
    # chattts 配置
    chattts_speaker: str = "female_01"
    chattts_sample_rate: int = 24000
    chattts_model_dir: str = r"G:\PythonSoft\huggingface_models\chattts"  # ChatTTS 本地模型路径
    model : str = "ollama"
    model_name: str = "qwen2.5:7b"
    temperature: float = 0.7
    max_tokens: int = 512
    top_p: float = 0.9
    stream: bool = False
    text: Optional[str] = None  # 用于chattts
    sample_rate: int = 24000  # 用于chattts

    text: Optional[str] = None  # 用于 chattts
    speaker: Optional[str] = None  # 用于 chattts
    speed: Optional[float] = None  # 用于 chattts
    language: Optional[str] = None  # 用于 whisper
    batch_size: Optional[int] = None  # 用于 whisper
    align_output: Optional[bool] = None  # 用于 whisper
    custom_alignment_model: Optional[str] = None  # 用于 whisper



APP_CONFIG = AppConfig()
MODEL_CONFIG = ModelConfig()

# Temporary directory for file handling
TEMP_DIR = "temp_files"
os.makedirs(TEMP_DIR, exist_ok=True)
