"""
Ollama LLM客户端
"""
import httpx
from typing import AsyncGenerator, Optional, List, Dict, Any
from app.core.config import settings
import logging

logger = logging.getLogger(__name__)


class OllamaClient:
    """Ollama API客户端"""
    
    def __init__(self):
        self.base_url = settings.OLLAMA_BASE_URL
        self.llm_model = settings.OLLAMA_LLM_MODEL
        self.embed_model = settings.OLLAMA_EMBED_MODEL
        self.temperature = settings.OLLAMA_TEMPERATURE
        self.max_tokens = settings.OLLAMA_MAX_TOKENS
    
    async def generate_embedding(self, text: str) -> List[float]:
        """
        生成文本的向量表示
        
        Args:
            text: 输入文本
            
        Returns:
            向量列表
        """
        try:
            async with httpx.AsyncClient(timeout=60.0) as client:
                # 使用 Ollama 的 embeddings API（官方正确的endpoint）
                response = await client.post(
                    f"{self.base_url}/api/embeddings",
                    json={
                        "model": self.embed_model,
                        "prompt": text  # Ollama 使用 "prompt" 而不是 "input"
                    }
                )
                response.raise_for_status()
                result = response.json()
                # Ollama embeddings API 返回 {"embedding": [...]}（单数）
                embedding = result.get("embedding")
                if not embedding or len(embedding) == 0:
                    raise ValueError("Ollama 返回了空的 embedding")
                # 直接返回 embedding
                return embedding
        except Exception as e:
            logger.error(f"生成向量失败: {str(e)}")
            logger.error(f"Ollama 地址: {self.base_url}, 模型: {self.embed_model}")
            raise
    
    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
    ) -> str:
        """
        聊天补全（非流式）
        
        Args:
            messages: 消息列表 [{"role": "user/assistant", "content": "..."}]
            temperature: 温度参数
            max_tokens: 最大token数
            
        Returns:
            回复内容
        """
        try:
            # 构建prompt（Ollama不直接支持messages格式，需要转换）
            prompt = self._build_prompt(messages)
            
            async with httpx.AsyncClient(timeout=60.0) as client:
                response = await client.post(
                    f"{self.base_url}/api/generate",
                    json={
                        "model": self.llm_model,
                        "prompt": prompt,
                        "stream": False,
                        "options": {
                            "temperature": temperature or self.temperature,
                            "num_predict": max_tokens or self.max_tokens,
                        }
                    }
                )
                response.raise_for_status()
                result = response.json()
                return result.get("response", "")
        except Exception as e:
            logger.error(f"聊天补全失败: {str(e)}")
            raise
    
    async def chat_completion_stream(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
    ) -> AsyncGenerator[str, None]:
        """
        聊天补全（流式输出）
        
        Args:
            messages: 消息列表
            temperature: 温度参数
            max_tokens: 最大token数
            
        Yields:
            生成的文本片段
        """
        try:
            prompt = self._build_prompt(messages)
            
            async with httpx.AsyncClient(timeout=120.0) as client:
                async with client.stream(
                    "POST",
                    f"{self.base_url}/api/generate",
                    json={
                        "model": self.llm_model,
                        "prompt": prompt,
                        "stream": True,
                        "options": {
                            "temperature": temperature or self.temperature,
                            "num_predict": max_tokens or self.max_tokens,
                        }
                    }
                ) as response:
                    response.raise_for_status()
                    async for line in response.aiter_lines():
                        if line:
                            try:
                                import json
                                data = json.loads(line)
                                if "response" in data:
                                    yield data["response"]
                            except json.JSONDecodeError:
                                continue
        except Exception as e:
            logger.error(f"流式聊天失败: {str(e)}")
            raise
    
    def _build_prompt(self, messages: List[Dict[str, str]]) -> str:
        """
        将消息列表转换为prompt
        
        Args:
            messages: 消息列表
            
        Returns:
            格式化的prompt
        """
        prompt_parts = []
        for msg in messages:
            role = msg.get("role", "user")
            content = msg.get("content", "")
            if role == "system":
                prompt_parts.append(f"System: {content}")
            elif role == "user":
                prompt_parts.append(f"User: {content}")
            elif role == "assistant":
                prompt_parts.append(f"Assistant: {content}")
        
        prompt_parts.append("Assistant:")
        return "\n\n".join(prompt_parts)
    
    async def check_health(self) -> bool:
        """
        检查Ollama服务是否可用
        
        Returns:
            是否可用
        """
        try:
            async with httpx.AsyncClient(timeout=5.0) as client:
                response = await client.get(f"{self.base_url}/api/tags")
                return response.status_code == 200
        except Exception as e:
            logger.error(f"Ollama健康检查失败: {str(e)}")
            return False


# 全局客户端实例
ollama_client = OllamaClient()

