from llm.provider_factory import ProviderFactory

class ModelService:
    @staticmethod
    def call(provider_name: str, model_name: str, mode: str, stream: bool = False, **kwargs):
        """
        统一模型调用接口
        :param provider_name: 配置里的 provider 名字，例如 "openai_official"
        :param model_name: 模型名称，例如 "gpt-4o-mini"
        :param mode: "chat" 或 "embed"
        :param stream: 是否流式返回，仅 chat 模式有效
        :param kwargs: 传给具体 provider 的参数，比如 prompt 或 text
        """
        provider = ProviderFactory.get_provider(provider_name)

        if mode == "chat":
            prompt = kwargs.get("prompt")
            if not prompt:
                raise ValueError("chat 模式需要 prompt 参数")

            if stream:
                # 返回一个生成器，调用方自己遍历
                return provider.chat_stream(model_name, prompt)
            else:
                return provider.chat(model_name, prompt)

        elif mode == "embed":
            text = kwargs.get("text")
            if not text:
                raise ValueError("embed 模式需要 text 参数")
            return provider.embed(model_name, text)

        else:
            raise ValueError(f"未知调用方式: {mode}")
