"""
Ollama提供商
支持访问本地运行的Ollama模型
"""
import json
import os
import time
import logging
import subprocess
from typing import Dict, List, Optional, Any, Generator
import requests

from ..core.provider import LocalProvider
from ..core.exceptions import map_provider_error
from ..implementations.ollama.streaming import OllamaStreaming
from ..core.provider_registry import register_provider

# 设置日志
logger = logging.getLogger("llm.ollama")


@register_provider("ollama")
class OllamaProvider(LocalProvider):
    """Ollama提供商，用于访问本地运行的Ollama模型"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化Ollama提供商
        
        Args:
            config: 配置字典
        """
        # 设置特定属性
        self.host = config.get("host", "http://localhost:11434")
        self.keep_alive = config.get("keep_alive", 9999)  # 保持模型加载的时间（秒）
        self.use_library = False
        
        # 初始化父类
        super().__init__(config)
        
        # 初始化特有配置
        self.model = config.get("model", "llama3.3:70b-instruct-q4_K_M")
        
        # 检查Ollama是否已安装
        self._check_ollama_installed()
        
    def _validate_config(self, config: Dict[str, Any]) -> None:
        """
        验证配置合法性
        
        Args:
            config: 配置字典
            
        Raises:
            ConfigurationError: 如果配置不合法
        """
        # 本地提供商不需要API密钥，但需要检查服务可用性
        try:
            response = requests.get(f"{self.host}/api/version", timeout=3)
            if response.status_code == 200:
                logger.info(f"Ollama服务可用，版本: {response.json().get('version')}")
            else:
                logger.warning(f"Ollama服务响应异常: {response.status_code}")
        except Exception as e:
            logger.warning(f"无法连接到Ollama服务: {str(e)}")
            
    def _init_capabilities(self) -> None:
        """初始化Ollama支持的功能组件"""
        logger.debug("初始化Ollama功能组件")

        # 添加流式输出功能
        self.register_capability("streaming", OllamaStreaming)
        
    def _check_ollama_installed(self) -> None:
        """检查Ollama是否已安装"""
        try:
            # 尝试导入ollama库
            import ollama
            self.use_library = True
            logger.info("使用ollama库")
        except ImportError:
            # 如果库不可用，使用REST API
            self.use_library = False
            logger.info("未安装ollama库，将使用REST API")
                
    def complete(self, messages: List[Dict[str, str]], stream: bool = False) -> str:
        """
        获取LLM完成
        
        Args:
            messages: 消息列表
            stream: 是否使用流式响应
            
        Returns:
            完成的文本
        """
        if stream:
            return "".join([chunk for chunk in self.complete_stream(messages)])
            
        return self._call_with_retry(self._complete_internal, messages)
        
    def _complete_internal(self, messages: List[Dict[str, Any]]) -> str:
        """
        内部完成实现
        
        Args:
            messages: 消息列表
            
        Returns:
            完成的文本
        """
        start_time = time.time()
        
        try:
            if self.use_library:
                # 使用ollama库
                import ollama
                response = ollama.chat(
                    model=self.model,
                    messages=messages,
                    keep_alive=self.keep_alive,
                    stream=False,
                    options={
                        "temperature": self.temperature,
                        "num_ctx": self.max_tokens
                    }
                )
                result = response["message"]["content"]
                
                # 记录性能指标
                if "prompt_eval_count" in response:
                    prompt_tokens = response["prompt_eval_count"]
                    completion_tokens = response["eval_count"]
                    prompt_eval_duration = response["prompt_eval_duration"]
                    eval_duration = response["eval_duration"]
                    
                    s2ns = 1000000000  # 秒到纳秒的转换
                    logger.info(f"输入: {prompt_tokens}t/{prompt_eval_duration/s2ns:.1f}s {s2ns*prompt_tokens/prompt_eval_duration:.1f}t/s")
                    logger.info(f"输出: {completion_tokens}t/{eval_duration/s2ns:.1f}s {s2ns*completion_tokens/eval_duration:.1f}t/s")
            else:
                # 使用REST API
                response = requests.post(
                    f"{self.host}/api/chat",
                    json={
                        "model": self.model,
                        "messages": messages,
                        "keep_alive": self.keep_alive,
                        "stream": False,
                        "options": {
                            "temperature": self.temperature,
                            "num_ctx": self.max_tokens
                        }
                    }
                )
                
                if response.status_code != 200:
                    raise Exception(f"API请求失败: {response.status_code} {response.text}")
                    
                result_json = response.json()
                result = result_json["message"]["content"]
            
            elapsed = time.time() - start_time
            logger.info(f"Ollama请求完成，耗时: {elapsed:.2f}秒，模型: {self.model}")
            
            return result
        except Exception as e:
            elapsed = time.time() - start_time
            logger.error(f"Ollama请求失败，耗时: {elapsed:.2f}秒，错误: {str(e)}")
            
            # 映射错误为标准LLM异常
            mapped_error = map_provider_error("ollama", e, self.model)
            raise mapped_error
            
    def complete_stream(self, messages: List[Dict[str, str]]) -> Generator[str, None, None]:
        """
        获取LLM流式完成
        
        Args:
            messages: 消息列表
            
        Returns:
            流式响应生成器
        """
        # 检查是否支持流式输出
        if not self.has_capability("streaming"):
            # 如果不支持流式输出，使用标准完成方法并逐字符生成
            content = self._complete_internal(messages)
            for char in content:
                yield char
            return
            
        # 使用流式输出能力组件
        streaming = self.get_capability("streaming")
        yield from streaming.stream_response(messages)
