import os
from typing import List, Dict, Any, Optional
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
import json
import requests
import yaml

class QASystem:
    def __init__(self, vector_store, config_path="config/config.yaml"):
        """
        初始化混合问答系统
        
        Args:
            vector_store: VectorStore实例
            config_path: 配置文件路径
        """
        # 加载配置
        with open(config_path, 'r') as f:
            self.config = yaml.safe_load(f)
        
        # 使用传入的vector_store
        self.vector_store = vector_store
        
        # 设置Ollama API参数
        self.ollama_model_name = self.config['ollama']['model_name']
        self.ollama_api_base = self.config['ollama']['base_url']
        self.ollama_api_url = f"{self.ollama_api_base}/api/generate"
        self.ollama_chat_url = f"{self.ollama_api_base}/api/chat"
        
        # 设置相似度阈值
        self.similarity_threshold = self.config['retrieval']['similarity_threshold']
    
    def _query_vector_db(self, query: str, top_k: int = 3) -> List[Dict[str, Any]]:
        """查询向量库获取最相关的文档"""
        return self.vector_store.search(query, top_k)
    
    def _generate_from_ollama(self, prompt: str, temperature: float = 0.7, max_tokens: int = 1024) -> str:
        """通过Ollama API生成回答"""
        try:
            # 获取超时设置
            timeout = self.config.get('ollama', {}).get('timeout', 120)
            
            # 请求Ollama API
            response = requests.post(
                self.ollama_api_url,
                json={
                    "model": self.ollama_model_name,
                    "prompt": prompt,
                    "stream": False,
                    "options": {
                        "temperature": temperature,
                        "top_p": 0.9,
                        "top_k": 40,
                        "repeat_penalty": 1.1,
                        "max_tokens": max_tokens
                    }
                },
                timeout=timeout
            )
            
            # 解析响应
            if response.status_code == 200:
                response_data = response.json()
                # 尝试不同的响应格式
                if "response" in response_data:
                    return response_data["response"].strip()
                elif "message" in response_data:
                    return response_data["message"].get("content", "").strip()
                else:
                    return str(response_data).strip()  # 如果格式未知，返回整个响应
            else:
                error_msg = f"Ollama API调用失败: {response.status_code} - {response.text}"
                print(error_msg)
                return f"模型生成遇到问题: {error_msg}"
        except requests.exceptions.Timeout:
            error_msg = "Ollama API请求超时，请稍后重试"
            print(error_msg)
            return error_msg
        except Exception as e:
            error_msg = f"Ollama生成错误: {str(e)}"
            print(error_msg)
            return f"模型生成遇到问题: {str(e)}"
    
    def direct_answer(self, query: str) -> str:
        """直接使用Ollama部署的DeepSeek模型生成回答"""
        prompt = f"<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"
        return self._generate_from_ollama(prompt)
    
    def _get_response_from_vector_db(self, query: str, results: List[Dict[str, Any]]) -> Optional[str]:
        """从向量库检索结果生成回答"""
        if not results:
            return None
        
        # 根据检索到的内容生成回答
        contents = []
        for i, result in enumerate(results):
            doc = result['document']
            # 尝试不同的文档内容访问方式
            if hasattr(doc, 'page_content'):
                content = doc.page_content
            elif hasattr(doc, 'content'):
                content = doc.content
            elif isinstance(doc, str):
                content = doc
            else:
                content = str(doc)
            
            contents.append(f"相关信息 {i+1}：{content}")
        
        context = "\n".join(contents)
        
        prompt = f"""<|im_start|>user
基于以下信息回答问题，如果信息不足以回答问题，请直接回复"我没有足够的信息回答这个问题"。

问题：{query}

{context}
<|im_end|>

<|im_start|>assistant
"""
        
        answer = self._generate_from_ollama(prompt, temperature=0.3)
        
        if "我没有足够的信息回答这个问题" in answer:
            return None
            
        return answer
    
    def answer(self, query: str) -> Dict[str, Any]:
        """
        回答用户问题
        
        Args:
            query: 用户问题
        
        Returns:
            Dict包含回答以及来源信息
        """
        # 1. 首先查询向量库
        vector_results = self._query_vector_db(query)
        
        # 2. 尝试从向量库结果生成回答
        if vector_results:
            vector_response = self._get_response_from_vector_db(query, vector_results)
            if vector_response:
                # 构建引用列表
                references = []
                for result in vector_results:
                    doc = result['document']
                    if hasattr(doc, 'page_content'):
                        content = doc.page_content
                    elif hasattr(doc, 'content'):
                        content = doc.content
                    elif isinstance(doc, str):
                        content = doc
                    else:
                        content = str(doc)
                    
                    references.append({
                        "content": content,
                        "similarity": result["score"]
                    })
                
                return {
                    "answer": vector_response,
                    "source": "vector_db",
                    "references": references
                }
        
        # 3. 如果向量库匹配失败，使用DeepSeek模型直接回答
        llm_response = self.direct_answer(query)
        return {
            "answer": llm_response,
            "source": "ollama",
            "references": []
        }
    
    def chat(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
        """
        聊天接口，支持聊天历史
        
        Args:
            messages: 聊天记录，格式为[{"role": "user"/"assistant", "content": "消息内容"}, ...]
        
        Returns:
            Dict包含回答以及来源信息
        """
        try:
            # 先检查最新的用户消息
            latest_user_message = None
            for msg in reversed(messages):
                if msg["role"] == "user":
                    latest_user_message = msg["content"]
                    break
            
            if not latest_user_message:
                return {
                    "answer": "无法找到用户消息",
                    "source": "system",
                    "references": []
                }

            # 1. 首先查询向量库
            vector_results = self._query_vector_db(latest_user_message)
            
            # 2. 尝试从向量库结果生成回答
            if vector_results:
                vector_response = self._get_response_from_vector_db(latest_user_message, vector_results)
                if vector_response:
                    # 构建引用列表
                    references = []
                    for result in vector_results:
                        doc = result['document']
                        if hasattr(doc, 'page_content'):
                            content = doc.page_content
                        elif hasattr(doc, 'content'):
                            content = doc.content
                        elif isinstance(doc, str):
                            content = doc
                        else:
                            content = str(doc)
                        
                        references.append({
                            "content": content,
                            "similarity": result["score"]
                        })
                    
                    return {
                        "answer": vector_response,
                        "source": "vector_db",
                        "references": references
                    }
            
            # 3. 如果向量库匹配失败，使用DeepSeek模型直接回答
            # 构建聊天历史
            chat_history = []
            for msg in messages:
                chat_history.append({
                    "role": msg["role"],
                    "content": msg["content"]
                })
            
            try:
                response = requests.post(
                    self.ollama_chat_url,
                    json={
                        "model": self.ollama_model_name,
                        "messages": chat_history,
                        "stream": False,
                        "options": {
                            "temperature": 0.7,
                            "top_p": 0.9,
                            "top_k": 40,
                            "repeat_penalty": 1.1
                        }
                    }
                )
                
                if response.status_code == 200:
                    response_data = response.json()
                    if "message" in response_data:
                        return {
                            "answer": response_data["message"]["content"].strip(),
                            "source": "ollama",
                            "references": []
                        }
                
                return {
                    "answer": "模型生成遇到问题，请稍后重试",
                    "source": "system",
                    "references": []
                }
                
            except Exception as e:
                return {
                    "answer": f"聊天请求失败: {str(e)}",
                    "source": "system",
                    "references": []
                }
                
        except Exception as e:
            return {
                "answer": f"处理聊天消息时出错: {str(e)}",
                "source": "system",
                "references": []
            }