import json
from typing import List, Generator, Optional, Any
import requests
from requests.exceptions import RequestException

from .base import BaseProvider

class OllamaProvider(BaseProvider):
    """Ollama API 提供者，用于与 Ollama 服务进行交互"""
    
    def __init__(self, base_url: str = "http://localhost:11434", api_key: Optional[str] = None, timeout: int = 60):
        """初始化 Ollama 提供者
        
        Args:
            base_url: Ollama 服务的基础URL，默认为本地服务
            api_key: API密钥（Ollama目前不需要，保留兼容性）
            timeout: 请求超时时间（秒）
        """
        super().__init__(base_url, api_key, timeout)

    def chat(self, model: str, prompt: str, **kwargs) -> str:
        """调用 Ollama 模型进行对话
        
        Args:
            model: 模型名称，如 'llama2'
            prompt: 用户输入的提示词
            **kwargs: 额外的参数，如 temperature, top_p 等
            
        Returns:
            str: 模型的回复
            
        Raises:
            Exception: 当API调用失败时抛出
        """
        url = f"{self.base_url}/api/chat"
        payload = {
            "model": model,
            "messages": [{"role": "user", "content": prompt}],
            "stream": False,
            **kwargs
        }
        
        try:
            print(url, payload)
            resp = requests.post(url, json=payload, timeout=self.timeout)
            resp.raise_for_status()
            return resp.json().get("message", {}).get("content", "")
        except RequestException as e:
            raise Exception(f"Ollama API调用失败: {str(e)}")

    def embed(self, model: str, text: str | List[str], **kwargs) -> List[List[float]]:
        """调用 Ollama 模型进行文本嵌入
        
        Args:
            model: 模型名称
            text: 待嵌入的文本或文本列表
            **kwargs: 额外的参数
            
        Returns:
            List[List[float]]: 文本的向量表示列表，即使输入单个文本也会返回长度为1的列表
            
        Raises:
            Exception: 当API调用失败时抛出
        """
        url = f"{self.base_url}/api/embed"
        # 确保 input 始终是列表
        input_texts = [text] if isinstance(text, str) else text
        payload = {
            "model": model,
            "input": input_texts,
            **kwargs
        }
        
        try:
            print(url, payload)
            resp = requests.post(url, json=payload, timeout=self.timeout)
            resp.raise_for_status()
            embeddings = resp.json().get("embeddings")
            if not embeddings:
                raise Exception("未能获取到向量嵌入结果")
            return embeddings
        except RequestException as e:
            raise Exception(f"Ollama API调用失败: {str(e)}")
            
    def chat_stream(self, model: str, prompt: str, **kwargs) -> Generator[str, None, None]:
        """流式调用 Ollama 模型生成回复
        
        Args:
            model: 模型名称，如 'llama2'
            prompt: 提示词
            **kwargs: 额外的参数，如 temperature, top_p 等
            
        Yields:
            str: 模型回复的片段
            
        Raises:
            Exception: 当API调用失败时抛出
        """
        url = f"{self.base_url}/api/chat"
        payload = {
            "model": model,
            "messages": [{"role": "user", "content": prompt}],
            "stream": True,
            **kwargs
        }
        
        try:
            with requests.post(url, json=payload, stream=True, timeout=self.timeout) as resp:
                resp.raise_for_status()
                for line in resp.iter_lines():
                    if line:
                        chunk = json.loads(line)
                        if "message" in chunk and "content" in chunk["message"]:
                            yield chunk["message"]["content"]
        except RequestException as e:
            raise Exception(f"Ollama API流式调用失败: {str(e)}")
