"""
Ollama API 封装

提供对本地 Ollama 服务的访问接口。
"""

import json
from typing import Any, Dict, Optional, Iterator

import requests

from .base import AIModel, ModelFactory
from ..utils.config import Config


class OllamaModel(AIModel):
    """Ollama 模型实现"""
    
    def __init__(self, config: Config) -> None:
        super().__init__(config)
        
        # 获取配置
        base_url = config.get_base_url("ollama")
        model = config.get_model("ollama")
        
        self.base_url = base_url.rstrip('/')
        self.model = model
        
        # 默认参数
        self.default_params = {
            "temperature": 0.7,
            "top_p": 0.9,
            "top_k": 40,
            "repeat_penalty": 1.1,
            "num_predict": 2048
        }
    
    def generate(self, prompt: str, stream: bool = False, **kwargs) -> str:
        """
        生成响应
        
        Args:
            prompt: 输入提示词
            stream: 是否使用流式API调用
            **kwargs: 其他参数
            
        Returns:
            生成的响应文本
        """
        try:
            # 合并参数
            params = self.default_params.copy()
            params.update(kwargs)
            
            if stream:
                # 使用流式API调用，但返回完整响应
                return self._generate_stream_complete(prompt, params)
            else:
                # 使用非流式API调用
                return self._generate_normal(prompt, params)
            
        except requests.exceptions.ConnectionError:
            raise RuntimeError(f"无法连接到Ollama服务: {self.base_url}")
        except requests.exceptions.Timeout:
            raise RuntimeError("请求超时，请检查Ollama服务状态")
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 404:
                raise ValueError(f"模型 '{self.model}' 不存在，请先下载该模型")
            else:
                raise RuntimeError(f"Ollama API错误: {e}")
        except Exception as e:
            raise RuntimeError(f"请求失败: {e}")
    
    def _generate_normal(self, prompt: str, params: Dict[str, Any]) -> str:
        """使用非流式API生成响应"""
        # 构建请求数据
        data = {
            "model": self.model,
            "prompt": prompt,
            "stream": False,
            **params
        }
        
        # 发送请求
        response = requests.post(
            f"{self.base_url}/api/generate",
            json=data,
            timeout=30
        )
        response.raise_for_status()
        
        # 解析响应
        result = response.json()
        return result.get("response", "未收到有效响应")
    
    def _generate_stream_complete(self, prompt: str, params: Dict[str, Any]) -> str:
        """使用流式API生成完整响应"""
        # 构建请求数据
        data = {
            "model": self.model,
            "prompt": prompt,
            "stream": True,
            **params
        }
        
        # 发送流式请求
        response = requests.post(
            f"{self.base_url}/api/generate",
            json=data,
            timeout=30,
            stream=True
        )
        response.raise_for_status()
        
        # 收集所有流式响应片段
        complete_response = ""
        for line in response.iter_lines():
            if line:
                try:
                    data_json = json.loads(line.decode('utf-8'))
                    if data_json.get("response"):
                        complete_response += data_json["response"]
                    if data_json.get("done", False):
                        break
                except json.JSONDecodeError:
                    continue
        
        return complete_response or "未收到有效响应"
    
    def generate_stream(self, prompt: str, **kwargs) -> Iterator[str]:
        """
        流式生成响应（返回迭代器）
        
        Args:
            prompt: 输入提示词
            **kwargs: 其他参数
            
        Yields:
            响应文本片段
        """
        try:
            # 合并参数
            params = self.default_params.copy()
            params.update(kwargs)
            
            # 构建请求数据
            data = {
                "model": self.model,
                "prompt": prompt,
                "stream": True,
                **params
            }
            
            # 发送流式请求
            response = requests.post(
                f"{self.base_url}/api/generate",
                json=data,
                timeout=30,
                stream=True
            )
            response.raise_for_status()
            
            # 处理流式响应
            for line in response.iter_lines():
                if line:
                    try:
                        data_json = json.loads(line.decode('utf-8'))
                        if data_json.get("response"):
                            yield data_json["response"]
                        if data_json.get("done", False):
                            break
                    except json.JSONDecodeError:
                        continue
                        
        except requests.exceptions.ConnectionError:
            raise RuntimeError(f"无法连接到Ollama服务: {self.base_url}")
        except requests.exceptions.Timeout:
            raise RuntimeError("请求超时，请检查Ollama服务状态")
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 404:
                raise ValueError(f"模型 '{self.model}' 不存在，请先下载该模型")
            else:
                raise RuntimeError(f"Ollama API错误: {e}")
        except Exception as e:
            raise RuntimeError(f"请求失败: {e}")
    
    def is_available(self) -> bool:
        """
        检查模型是否可用
        
        Returns:
            模型是否可用
        """
        try:
            # 检查服务是否运行
            response = requests.get(f"{self.base_url}/api/tags", timeout=5)
            if response.status_code != 200:
                return False
            
            # 检查模型是否存在
            models = response.json().get("models", [])
            return any(model["name"] == self.model for model in models)
            
        except Exception:
            return False
    
    def get_available_models(self) -> list:
        """
        获取可用的模型列表
        
        Returns:
            可用模型列表
        """
        try:
            response = requests.get(f"{self.base_url}/api/tags", timeout=5)
            response.raise_for_status()
            
            models = response.json().get("models", [])
            return [model["name"] for model in models]
            
        except Exception:
            return []
    
    def pull_model(self, model_name: Optional[str] = None) -> bool:
        """
        下载模型
        
        Args:
            model_name: 模型名称，如果为None则使用默认模型
            
        Returns:
            是否成功
        """
        model = model_name or self.model
        
        try:
            data = {"name": model}
            response = requests.post(
                f"{self.base_url}/api/pull",
                json=data,
                timeout=300  # 下载可能需要较长时间
            )
            response.raise_for_status()
            return True
            
        except Exception:
            return False


# 注册到工厂
ModelFactory.register("ollama", OllamaModel)
