# core/model/model_caller.py

import requests
from openai import OpenAI
from core.utils.logging_handler import LoggingHandler

class ModelCaller:
    def __init__(self, api_key=None, base_url=None, local_model_name=None, remote_model_name=None):
        self.logger = LoggingHandler().get_logger()
        self.api_key = api_key
        self.base_url = base_url
        self.local_model_name = local_model_name
        self.remote_model_name = remote_model_name
        self.client = None
        
        if api_key and base_url:
            # 如果有API Key和Base URL，初始化OpenAI客户端
            self.client = OpenAI(api_key=api_key, base_url=base_url)

    def call_model(self, prompt, use_remote=True):
        if use_remote and self.client:
            return self._call_remote_model(prompt)
        else:
            return self._call_local_model(prompt)

    def _call_remote_model(self, prompt):
        """调用远程模型（如Qwen）"""
        try:
            self.logger.info("调用远程模型")
            completion = self.client.chat.completions.create(
                model=self.remote_model_name,
                messages=[
                    {'role': 'system', 'content': 'You are a helpful assistant.'},
                    {'role': 'user', 'content': prompt}
                ],
                temperature=0.8
            )
            response = completion.choices[0].message.content.strip()
            self.logger.info(f"远程模型返回的结果: {response}")
            return response
        except Exception as e:
            self.logger.error(f"远程模型调用失败: {e}", exc_info=True)
            raise ValueError(f"Error calling remote model: {e}")

    def _call_local_model(self, prompt):
        """调用本地模型"""
        try:
            self.logger.info("调用本地模型")
            payload = {
                "model": self.local_model_name,
                "prompt": prompt,
                "stream": False
            }
            response = requests.post("http://localhost:11434/api", json=payload)
            response.raise_for_status()
            result = response.json().get("response").strip()
            self.logger.info(f"本地模型返回的结果: {result}")
            return result
        except requests.exceptions.RequestException as e:
            self.logger.error(f"本地模型调用失败: {e}", exc_info=True)
            raise ValueError(f"Error calling local model: {e}")
