import requests
import json
from typing import List
from config import OLLAMA_CONFIG
import logging

class OllamaService:
    """Ollama服务封装类"""
    
    def __init__(self):
        """初始化Ollama服务"""
        self.base_url = OLLAMA_CONFIG['base_url']
        self.default_model = OLLAMA_CONFIG['default_model']
        self.logger = logging.getLogger(__name__)

    def list_models(self) -> List[str]:
        """获取可用的嵌入模型列表
        
        Returns:
            List[str]: 模型名称列表，如果获取失败则返回空列表
        """
        try:
            response = requests.get(f"{self.base_url}/api/tags")
            if response.status_code == 200:
                # 解析响应获取模型列表
                models = response.json()
                # 返回所有模型名称
                return [model['name'] for model in models.get('models', [])]
            else:
                self.logger.error(f"Failed to get models: {response.text}")
                return []
                
        except Exception as e:
            self.logger.error(f"Error listing models: {str(e)}")
            return []

    def get_embeddings(self, text: str, model_name: str) -> List[float]:
        """获取文本的嵌入向量
        
        Args:
            text: 输入文本
            model_name: 要使用的模型名称
            
        Returns:
            List[float]: 向量表示
        """
        try:
            response = requests.post(
                f"{self.base_url}/api/embeddings",
                json={
                    "model": model_name,
                    "prompt": text
                }
            )
            
            if response.status_code == 200:
                return response.json()['embedding']
            else:
                raise Exception(f"Failed to get embeddings: {response.text}")
                
        except Exception as e:
            self.logger.error(f"Error getting embeddings: {str(e)}")
            raise

    def chat(self, system_prompt: str, user_message: str, model_name: str) -> str:
        """
        与Ollama模型对话
        Args:
            system_prompt: 系统提示词
            user_message: 用户消息
            model_name: 模型名称
        Returns:
            str: 模型回复的消息内容
        """
        try:
            url = f"{self.base_url}/api/chat"
            
            # 构建请求消息
            messages = [
                {
                    "role": "system",
                    "content": system_prompt
                },
                {
                    "role": "user",
                    "content": user_message
                }
            ]

            print(f"Chat request: {messages}")
            
            response = requests.post(
                url,
                json={
                    "model": model_name,
                    "messages": messages,
                    "stream": False
                }
            )

            print(f"Chat response: {response.text}")
            
            if response.status_code != 200:
                print(f"Error response from Ollama: {response.text}")
                return ""
                
            try:
                # 添加详细的错误处理和日志
                response_json = response.json()
                
                if 'message' in response_json:
                    return response_json['message']['content']
                else:
                    print(f"Unexpected response structure: {response_json}")
                    return ""
                    
            except json.JSONDecodeError as e:
                print(f"JSON decode error: {e}")
                print(f"Response text: {response.text}")
                return ""
                
        except Exception as e:
            print(f"Chat error: {str(e)}")
            return ""
        
    def submit_prompt(self, prompt, model, **kwargs) -> str:
        # self.log(
        # f"Ollama parameters:\n"
        # f"model={self.model_name},\n"
        # f"options={self.ollama_options},\n"
        # f"keep_alive={self.keep_alive}")
        # self.log(f"Prompt Content:\n{json.dumps(prompt, ensure_ascii=False)}")
        
        print(f"Chat request: {prompt}")

        response = requests.post(
            f"{self.base_url}/api/chat",
            json={
                "model": model,
                "messages": prompt,
                "stream": False
            }
        )

        print(f"Chat response: {response.text}")

        if response.status_code != 200:
            raise Exception(f"API call failed: {response.text}")

        return response.json()['message']['content']