import requests
from typing import List
from multihop.core.docment import  logger


class DeepSeekClient:
    def __init__(self, base_url: str = "http://localhost:11434", model_name: str = "deepseek:1.5b"):
        self.base_url = base_url
        self.model_name = model_name
        self.session = requests.Session()

        # 添加健康检查
        self._check_service_health()

    def _check_service_health(self):
        """检查Ollama服务健康状态"""
        try:
            response = self.session.get(f"{self.base_url}/api/tags", timeout=5)
            response.raise_for_status()

            # 检查模型是否可用
            models = response.json().get("models", [])
            model_names = [m.get("name", "") for m in models]

            if not any(self.model_name in name for name in model_names):
                logger.warning(f"模型 {self.model_name} 未找到，可用模型: {model_names}")

        except requests.exceptions.RequestException as e:
            logger.error(f"Ollama服务连接失败: {e}")
            raise Exception(f"无法连接到Ollama服务，请确保服务正在运行: {e}")

    def generate(self, prompt: str, max_tokens: int = 512, temperature: float = 0.7) -> str:
        """生成文本"""
        try:
            payload = {
                "model": self.model_name,
                "prompt": prompt,
                "stream": False,
                "options": {
                    "num_predict": max_tokens,
                    "temperature": temperature
                }
            }

            response = self.session.post(
                f"{self.base_url}/api/generate",
                json=payload,
                timeout=60
            )

            # 详细的错误信息
            if response.status_code == 404:
                raise Exception(f"API端点未找到，请检查Ollama版本和API路径。URL: {response.url}")
            elif response.status_code == 500:
                raise Exception(f"Ollama服务内部错误，可能是模型加载问题")

            response.raise_for_status()

            result = response.json()
            return result.get("response", "").strip()

        except requests.exceptions.Timeout:
            logger.error("请求超时，模型可能正在加载中")
            return ""
        except requests.exceptions.ConnectionError:
            logger.error("连接失败，请确保Ollama服务正在运行")
            return ""
        except Exception as e:
            logger.error(f"模型生成失败: {e}")
            return ""