"""
Ollama 客户端

用于与本地 Ollama 服务进行通信
"""

import json
import asyncio
from typing import List, Dict, Any, Optional
import aiohttp


class OllamaClient:
    """Ollama API 客户端"""
    
    def __init__(self, base_url: str = "http://localhost:11434", model: str = "qwen2.5:8b"):
        self.base_url = base_url.rstrip('/')
        self.model = model
        self.session = None
    
    async def _get_session(self):
        """获取 HTTP 会话"""
        if self.session is None:
            self.session = aiohttp.ClientSession()
        return self.session
    
    async def close(self):
        """关闭客户端"""
        if self.session:
            await self.session.close()
            self.session = None
    
    async def chat_completion(self, messages: List[Dict[str, str]], 
                             temperature: float = 0.7, 
                             max_tokens: int = 2000) -> str:
        """
        聊天完成 API
        
        Args:
            messages: 消息列表，格式为 [{"role": "user", "content": "..."}]
            temperature: 温度参数
            max_tokens: 最大令牌数
            
        Returns:
            模型响应文本
        """
        
        # 将 OpenAI 格式的消息转换为 Ollama 格式
        prompt = self._convert_messages_to_prompt(messages)
        
        url = f"{self.base_url}/api/generate"
        payload = {
            "model": self.model,
            "prompt": prompt,
            "stream": False,
            "options": {
                "temperature": temperature,
                "num_predict": max_tokens
            }
        }
        
        try:
            session = await self._get_session()
            async with session.post(url, json=payload) as response:
                if response.status == 200:
                    result = await response.json()
                    return result.get("response", "")
                else:
                    error_text = await response.text()
                    raise Exception(f"Ollama API 错误 ({response.status}): {error_text}")
                    
        except aiohttp.ClientConnectorError:
            raise Exception("无法连接到 Ollama 服务。请确保 Ollama 正在运行 (ollama serve)")
        except Exception as e:
            raise Exception(f"调用 Ollama API 失败: {str(e)}")
    
    def _convert_messages_to_prompt(self, messages: List[Dict[str, str]]) -> str:
        """
        将 OpenAI 格式的消息转换为适合 Ollama 的提示
        """
        prompt_parts = []
        
        for message in messages:
            role = message.get("role", "user")
            content = message.get("content", "")
            
            if role == "system":
                prompt_parts.append(f"系统: {content}")
            elif role == "user":
                prompt_parts.append(f"用户: {content}")
            elif role == "assistant":
                prompt_parts.append(f"助手: {content}")
        
        # 添加最后的助手前缀
        prompt_parts.append("助手:")
        
        return "\n\n".join(prompt_parts)
    
    async def list_models(self) -> List[str]:
        """列出可用模型"""
        url = f"{self.base_url}/api/tags"
        
        try:
            session = await self._get_session()
            async with session.get(url) as response:
                if response.status == 200:
                    result = await response.json()
                    models = [model.get("name", "") for model in result.get("models", [])]
                    return models
                else:
                    return []
        except:
            return []
    
    async def check_connection(self) -> bool:
        """检查 Ollama 连接"""
        try:
            models = await self.list_models()
            return len(models) > 0
        except:
            return False


# 创建 OpenAI 兼容的接口类
class OllamaOpenAICompatible:
    """
    模拟 OpenAI 客户端接口的 Ollama 客户端
    让现有代码可以无缝切换到 Ollama
    """
    
    def __init__(self, base_url: str = "http://localhost:11434", model: str = "qwen2.5:8b"):
        self.ollama_client = OllamaClient(base_url, model)
        self.chat = self.ChatCompletions(self.ollama_client)
    
    class ChatCompletions:
        def __init__(self, ollama_client: OllamaClient):
            self.ollama_client = ollama_client
        
        async def create(self, model: str, messages: List[Dict[str, str]], 
                        temperature: float = 0.7, max_tokens: int = 2000):
            """创建聊天完成"""
            content = await self.ollama_client.chat_completion(
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens
            )
            
            # 返回 OpenAI 格式的响应
            return MockOpenAIResponse(content)
    
    async def close(self):
        """关闭客户端"""
        await self.ollama_client.close()


class MockOpenAIResponse:
    """模拟 OpenAI 响应格式"""
    
    def __init__(self, content: str):
        self.choices = [MockChoice(content)]


class MockChoice:
    """模拟 OpenAI Choice"""
    
    def __init__(self, content: str):
        self.message = MockMessage(content)


class MockMessage:
    """模拟 OpenAI Message"""
    
    def __init__(self, content: str):
        self.content = content