"""
AI服务模块
负责与Ollama模型进行对话
"""
import requests
import json
from typing import List, Dict, Any, Optional
from ..core.config import config


class AIService:
    """AI服务类"""
    
    def __init__(self):
        self.base_url = self._get_ollama_url()
        self.default_model = self._get_default_model()
    
    def _get_ollama_url(self) -> str:
        """获取Ollama服务地址"""
        for provider in config.get('ai.providers', []):
            if provider.get('name') == 'ollama':
                return provider.get('base_url', 'http://localhost:11434')
        return 'http://localhost:11434'
    
    def _get_default_model(self) -> str:
        """获取默认对话模型"""
        for provider in config.get('ai.providers', []):
            if provider.get('name') == 'ollama':
                return provider.get('default_model', 'deepseek-r1:1.5b')
        return 'deepseek-r1:1.5b'
    
    def get_current_model(self) -> str:
        """获取当前使用的模型"""
        return self._get_default_model()
    
    def update_default_model(self, model_name: str):
        """更新默认模型"""
        self.default_model = model_name
    
    def generate_response(self, prompt: str, model: Optional[str] = None, 
                         context: Optional[List[str]] = None) -> str:
        """生成AI回答"""
        if not model:
            model = self.default_model
        
        try:
            # 构建完整的提示词
            full_prompt = self._build_prompt(prompt, context)
            
            # 调用Ollama API
            response = requests.post(
                f"{self.base_url}/api/generate",
                json={
                    "model": model,
                    "prompt": full_prompt,
                    "stream": False,
                    "options": {
                        "temperature": 0.7,
                        "top_p": 0.9,
                        "max_tokens": 1024  # 减少token数量以加快响应
                    }
                },
                timeout=30  # 减少超时时间
            )
            
            if response.status_code == 200:
                data = response.json()
                return data.get('response', '抱歉，我无法生成回答。')
            else:
                return f"AI服务错误: {response.status_code}"
                
        except Exception as e:
            print(f"AI服务调用失败: {e}")
            return f"AI服务暂时不可用: {str(e)}"
    
    def _build_prompt(self, question: str, context: Optional[List[str]] = None) -> str:
        """构建提示词"""
        if not context:
            return question
        
        # 构建包含上下文的提示词
        context_text = "\n\n".join(context)
        
        prompt = f"""基于以下文档内容回答问题：

文档内容：
{context_text}

问题：{question}

请基于文档内容提供准确、详细的回答。如果文档中没有相关信息，请说明无法从文档中找到答案。

回答："""
        
        return prompt
    
    def generate_qa_response(self, question: str, relevant_chunks: List[Dict[str, Any]]) -> str:
        """基于相关文档块生成问答回答"""
        if not relevant_chunks:
            return "抱歉，我在文档中没有找到相关信息来回答您的问题。"
        
        # 提取相关文档内容
        context_chunks = []
        for chunk in relevant_chunks:
            context_chunks.append(f"文档：{chunk['file_name']}\n内容：{chunk['content']}")
        
        # 生成回答
        return self.generate_response(question, context=context_chunks)
    
    def test_connection(self) -> bool:
        """测试AI服务连接"""
        try:
            response = requests.get(f"{self.base_url}/api/tags", timeout=5)
            return response.status_code == 200
        except:
            return False
    
    def get_available_models(self) -> List[Dict[str, Any]]:
        """获取可用模型列表"""
        try:
            response = requests.get(f"{self.base_url}/api/tags", timeout=10)
            if response.status_code == 200:
                data = response.json()
                return data.get('models', [])
        except Exception as e:
            print(f"获取模型列表失败: {e}")
        
        return []
    
    def get_model_info(self, model_name: str) -> Optional[Dict[str, Any]]:
        """获取模型信息"""
        try:
            response = requests.post(
                f"{self.base_url}/api/show",
                json={"name": model_name},
                timeout=10
            )
            if response.status_code == 200:
                return response.json()
        except Exception as e:
            print(f"获取模型信息失败: {e}")
        
        return None


# 全局AI服务实例
ai_service = AIService() 