
import yaml
from pathlib import Path
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatZhipuAI
import langchain

# 解决全局版本不兼容的问题
# 兼容最新 langchain-core 0.3.74
if not hasattr(langchain, "verbose"):
    langchain.verbose = False
if not hasattr(langchain, "debug"):
    langchain.debug = False
if not hasattr(langchain, "llm_cache"):
    langchain.llm_cache = None

class LLMFactory:
    """全局 LLM 工厂（支持 OpenAI 和 ZhipuAI，通过配置文件切换）"""
    _instance = None

    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance

    def __init__(self, config_path=None):
        if not hasattr(self, "_initialized"):
            # 1️⃣ 默认搜索逻辑
            if config_path:
                path = Path(config_path)
            else:
                # 先找 util 包目录（llm_factory.py 所在目录）
                path = Path(__file__).parent / "config.yaml"
                if not path.exists():
                    # 再找项目根目录（运行脚本所在目录）
                    path = Path.cwd() / "config.yaml"

            if not path.exists():
                raise FileNotFoundError(f"未找到配置文件: {path}")

            # 2️⃣ 加载配置
            self.config = self._load_config(path)

            # 3️⃣ 创建 LLM
            self.llm = self._create_llm()

            # 4️⃣ 标记初始化完成
            self._initialized = True

    def _load_config(self, path):
        config_file = Path(path)
        if not config_file.exists():
            raise FileNotFoundError(f"未找到配置文件: {path}")
        with open(config_file, "r", encoding="utf-8") as f:
            return yaml.safe_load(f)

    def _create_llm(self):
        cfg = self.config["llm"]
        provider = cfg.get("provider", "zhipu")
        api_key = cfg.get("api_key")
        model = cfg.get("model")
        temperature = cfg.get("temperature", 0.6)

        if provider == "zhipu":
            return ChatZhipuAI(
                temperature=temperature,
                model=model or "glm-4-flash",
                zhipuai_api_key=api_key,
                streaming=True,   # 开启流式
            )
        elif provider == "openai":
            return ChatOpenAI(
                temperature=temperature,
                model=model or "gpt-4o-mini",
                openai_api_key=api_key,
                streaming=True,   # 开启流式
            )
        else:
            raise ValueError(f"不支持的 provider: {provider}")

    def get_llm(self):
        return self.llm

    def stream(self, prompt: str):
        """流式输出调用"""
        for chunk in self.llm.stream(prompt):
            print(chunk.content, end="", flush=True)
        print()  # 换行
