import requests
from typing import Dict, Any, Optional, List, Union, Iterator
import json
import time
from config import OLLAMA_CONFIG
from llm.model_interface import ModelInterface, BaseEmbeddingMixin
from utils.model_logger import ModelLogger

class OllamaClient(ModelInterface, BaseEmbeddingMixin):
    def __init__(self, config: Optional[Dict[str, Any]] = None, api_key: Optional[str] = None):
        ModelInterface.__init__(self)
        BaseEmbeddingMixin.__init__(self)
        self.config = config or OLLAMA_CONFIG
        self.base_url = self.config['base_url']
        self.default_model = self.config['default_model']
        self.embedding_model = self.config['embedding_model']
        self.timeout = self.config.get('timeout', 60)
        self.logger = ModelLogger()

    def _prepare_payload(self, 
                        model: Optional[str] = None,
                        max_tokens: Optional[int] = None,
                        temperature: Optional[float] = None,
                        top_p: Optional[float] = None,
                        stream: bool = False,
                        **kwargs) -> Dict[str, Any]:
        """准备请求参数"""
        model = model or self.default_model
        payload = {
            "model": model,
            "stream": stream
        }

        # 基本参数
        if max_tokens:
            payload["num_predict"] = max_tokens
        if temperature:
            payload["temperature"] = temperature
        if top_p:
            payload["top_p"] = top_p

        # 其他可选参数
        for key in ['num_ctx', 'num_thread', 'num_gpu', 'num_batch', 
                   'repeat_penalty', 'seed', 'stop']:
            if key in self.config:
                payload[key] = self.config[key]

        # 更新其他传入的参数
        payload.update(kwargs)
        return payload

    def generate(self, 
                prompt: str, 
                model: Optional[str] = None,
                max_tokens: Optional[int] = None,
                temperature: Optional[float] = None,
                top_p: Optional[float] = None,
                stream: bool = False,
                **kwargs) -> Union[str, Iterator[str]]:
        """
        调用 Ollama 服务生成文本
        """
        url = f"{self.base_url}/api/generate"
        payload = self._prepare_payload(
            model=model,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=stream,
            **kwargs
        )
        payload["prompt"] = prompt

        start_time = time.time()
        try:
            if stream:
                return self._stream_generate(url, payload)
            else:
                response = requests.post(url, json=payload, timeout=self.timeout)
                response.raise_for_status()
                result = response.json()
                response_text = result['response']
                
                # 记录调用日志
                duration = time.time() - start_time
                self.logger.log_model_call(
                    call_type='generate',
                    request=payload,
                    response=response_text,
                    duration=duration
                )
                
                return response_text
        except Exception as e:
            # 记录错误日志
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='generate',
                request=payload,
                error=str(e),
                duration=duration
            )
            raise Exception(f"Ollama API 调用失败: {str(e)}")

    def _stream_generate(self, url: str, payload: Dict[str, Any]) -> Iterator[str]:
        """处理流式生成"""
        try:
            response = requests.post(url, json=payload, stream=True, timeout=self.timeout)
            response.raise_for_status()
            
            for line in response.iter_lines():
                if not line:
                    continue
                    
                try:
                    data = json.loads(line.decode())
                    if "response" in data:
                        yield data["response"]
                except json.JSONDecodeError:
                    continue
                    
        except Exception as e:
            raise Exception(f"Ollama 流式生成失败: {str(e)}")

    def chat(self, 
            messages: List[Dict[str, str]],
            model: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None,
            top_p: Optional[float] = None,
            stream: bool = False,
            **kwargs) -> Union[str, Iterator[str]]:
        """
        调用 Ollama 服务进行对话
        """
        url = f"{self.base_url}/api/chat"
        payload = self._prepare_payload(
            model=model,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            stream=stream,
            **kwargs
        )
        payload["messages"] = messages

        start_time = time.time()
        try:
            if stream:
                return self._stream_chat(url, payload)
            else:
                response = requests.post(url, json=payload, timeout=self.timeout)
                response.raise_for_status()
                result = response.json()
                response_text = result['message']['content']
                
                # 记录调用日志
                duration = time.time() - start_time
                self.logger.log_model_call(
                    call_type='chat',
                    request=payload,
                    response=response_text,
                    duration=duration
                )
                
                return response_text
        except Exception as e:
            # 记录错误日志
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='chat',
                request=payload,
                error=str(e),
                duration=duration
            )
            raise Exception(f"Ollama API 调用失败: {str(e)}")

    def _stream_chat(self, url: str, payload: Dict[str, Any]) -> Iterator[str]:
        """处理流式对话"""
        try:
            response = requests.post(url, json=payload, stream=True, timeout=self.timeout)
            response.raise_for_status()
            
            for line in response.iter_lines():
                if not line:
                    continue
                    
                try:
                    data = json.loads(line.decode())
                    if "message" in data and "content" in data["message"]:
                        yield data["message"]["content"]
                except json.JSONDecodeError:
                    continue
                    
        except Exception as e:
            raise Exception(f"Ollama 流式对话失败: {str(e)}")

    def _get_remote_embeddings(self, 
                      text: Union[str, List[str]], 
                      model: Optional[str] = None) -> List[List[float]]:
        """
        调用 Ollama 服务获取文本嵌入向量，支持批量处理
        
        Args:
            text (Union[str, List[str]]): 单个文本或文本列表
            model (Optional[str]): 模型名称，默认使用配置中的embedding_model
            
        Returns:
            List[List[float]]: 文本嵌入向量列表
            
        Raises:
            Exception: 当API调用失败时
        """
        model = model or self.embedding_model
        url = f"{self.base_url}/api/embed"
        
        # 统一转换为列表格式
        if isinstance(text, str):
            text = [text]
                
        payload = {
            "model": model,
            "input": text  # Ollama支持批量处理，直接传入文本列表
        }

        print(f"payload: {payload}")

        try:
            response = requests.post(url, json=payload, timeout=self.timeout)
            response.raise_for_status()
            result = response.json()
            return result['embeddings']  # 返回所有文本的嵌入向量列表
        except Exception as e:
            raise Exception(f"Ollama API 调用失败: {str(e)}")
    def _fetch_remote_models(self) -> List[str]:
        """获取 Ollama 服务中可用的模型列表，包含完整的模型名称和版本"""
        try:
            url = f"{self.base_url}/api/tags"
            response = requests.get(url, timeout=self.timeout)
            response.raise_for_status()
            models = response.json().get('models', [])
            # Ollama 的模型名称已经包含版本信息，例如：qwen2.5:14b
            return [model['name'] for model in models]
        except Exception as e:
            print(f"Error fetching Ollama models: {str(e)}")
            return []