"""
知识库服务
提供知识库检索和管理服务，支持高级检索功能
"""

import logging
import hashlib
import aiohttp
from typing import Dict, Any, List, Optional
from datetime import datetime

from config.settings import KNOWLEDGE_BASE_CONFIG, settings

logger = logging.getLogger(__name__)


async def _vector_search(
        query: str,
    knowledge_base_name: str,
    top_k: int,
    score_threshold: float
) -> List[Dict[str, Any]]:
    """
    向量搜索 - 调用外部知识库API

    Args:
        query: 查询内容
        knowledge_base_name: 知识库名称
        top_k: 返回结果数量
        score_threshold: 相似度阈值

    Returns:
        搜索结果列表
    """
    # 临时注释掉外部API调用，直接使用降级搜索
    # logger.info(f"外部知识库API调用已禁用，直接使用降级搜索: {query}, 知识库: {knowledge_base_name}")
    # return await _fallback_search(query, knowledge_base_name, top_k, score_threshold)
    #
    # 以下是原来的外部API调用代码，已注释掉

    try:
        logger.info(f"调用外部知识库API搜索: {query}, 知识库: {knowledge_base_name}")
        
        # 从配置中获取外部知识库API地址
        api_url = settings.AI_KNOWLEDGE_URL
        
        # 构建请求数据
        request_data = {
            "query": query,
            "knowledge_base_name": knowledge_base_name,
            "top_k": top_k,
            "score_threshold": score_threshold,
            "file_name": "",
            "metadata": {}
        }
        
        # 设置请求头
        headers = {
            'accept': 'application/json',
            'Content-Type': 'application/json'
        }
        
        # 发送请求
        async with aiohttp.ClientSession() as session:
            async with session.post(
                api_url,
                json=request_data,
                headers=headers,
                timeout=aiohttp.ClientTimeout(total=30)
            ) as response:
                if response.status == 200:
                    # 解析响应数据
                    api_results = await response.json()
                    
                    # 转换响应格式以匹配内部格式
                    converted_results = []
                    for item in api_results:
                        converted_result = {
                            "id": item.get("id", ""),
                            "content": item.get("page_content", ""),
                            "score": item.get("score", 0.0),
                            "source": knowledge_base_name,
                            "metadata": item.get("metadata", {}),
                            "type": item.get("type", "Document")
                        }
                        converted_results.append(converted_result)
                    
                    logger.info(f"外部API搜索成功，返回 {len(converted_results)} 个结果")
                    return converted_results
                else:
                    error_text = await response.text()
                    logger.error(f"外部知识库API调用失败: {response.status} - {error_text}")
                    
                    # 降级到模拟搜索
                    logger.warning("降级到模拟搜索")
                    return await _fallback_search(query, knowledge_base_name, top_k, score_threshold)
                    
    except Exception as e:
        logger.error(f"调用外部知识库API时出错: {str(e)}")
        
        # 降级到模拟搜索
        logger.warning("降级到模拟搜索")
        return await _fallback_search(query, knowledge_base_name, top_k, score_threshold)



async def _fallback_search(
    query: str,
    knowledge_base_name: str,
    top_k: int,
    score_threshold: float
) -> List[Dict[str, Any]]:
    """
    降级搜索 - 当外部API不可用时的备用方案

    Args:
        query: 查询内容
        knowledge_base_name: 知识库名称
        top_k: 返回结果数量
        score_threshold: 相似度阈值

    Returns:
        搜索结果列表
    """
    logger.info(f"使用降级搜索: {query}")

    # 模拟搜索结果
    search_results = [
        {
            "id": f"doc_{hashlib.md5(f'{query}_1'.encode()).hexdigest()}",
            "content": f"这是关于'{query}'的相关内容1，包含详细的信息和说明。",
            "score": 0.95,
            "source": f"{knowledge_base_name}/document1.txt",
            "metadata": {
                "page": 1,
                "section": "introduction",
                "tags": ["基础", "概念"],
                "created_at": "2024-01-01T10:00:00"
            },
            "type": "Document"
        },
        {
            "id": f"doc_{hashlib.md5(f'{query}_2'.encode()).hexdigest()}",
            "content": f"这是关于'{query}'的相关内容2，提供具体的示例和用法。",
            "score": 0.88,
            "source": f"{knowledge_base_name}/document2.txt",
            "metadata": {
                "page": 5,
                "section": "examples",
                "tags": ["示例", "用法"],
                "created_at": "2024-01-01T10:00:00"
            },
            "type": "Document"
        },
        {
            "id": f"doc_{hashlib.md5(f'{query}_3'.encode()).hexdigest()}",
            "content": f"这是关于'{query}'的相关内容3，包含常见问题和解决方案。",
            "score": 0.82,
            "source": f"{knowledge_base_name}/document3.txt",
            "metadata": {
                "page": 10,
                "section": "faq",
                "tags": ["问题", "解决"],
                "created_at": "2024-01-01T10:00:00"
            },
            "type": "Document"
        }
    ]

    # 过滤低于阈值的结果
    filtered_results = [
        result for result in search_results
        if result["score"] >= score_threshold
    ]

    return filtered_results[:top_k]


async def _expand_query(query: str) -> List[str]:
    """
    查询扩展

    Args:
        query: 原始查询

    Returns:
        扩展后的查询列表
    """
    # TODO: 实现查询扩展逻辑
    # 这里可以使用同义词、相关词、LLM生成等方式

    logger.info(f"扩展查询: {query}")

    # 简单的同义词扩展示例
    synonyms = {
        "什么是": ["定义", "概念", "含义", "解释"],
        "如何": ["怎么", "方法", "步骤", "流程"],
        "为什么": ["原因", "理由", "原理", "机制"],
        "功能": ["作用", "用途", "特性", "能力"],
        "问题": ["错误", "故障", "异常", "bug"],
        "解决": ["修复", "处理", "排除", "修复"],
        "配置": ["设置", "参数", "选项", "环境"],
        "安装": ["部署", "搭建", "配置", "初始化"],
        "使用": ["操作", "应用", "调用", "运行"],
        "开发": ["编程", "编码", "实现", "构建"]
    }

    expanded_queries = [query]

    # 基于同义词扩展
    for original, synonym_list in synonyms.items():
        if original in query:
            for synonym in synonym_list:
                expanded_query = query.replace(original, synonym)
                if expanded_query != query:
                    expanded_queries.append(expanded_query)

    # 添加原始查询的变体
    if "什么是" in query:
        expanded_queries.append(query.replace("什么是", "请解释"))
        expanded_queries.append(query.replace("什么是", "请说明"))

    if "如何" in query:
        expanded_queries.append(query.replace("如何", "怎么"))
        expanded_queries.append(query.replace("如何", "方法"))

    # 去重
    unique_queries = list(set(expanded_queries))

    logger.info(f"查询扩展完成，原始查询: {query}, 扩展查询: {unique_queries}")
    return unique_queries[:5]  # 限制最多5个查询


class KnowledgeService:
    """知识库服务类"""

    def __init__(self):
        """初始化知识库服务"""
        self.config = KNOWLEDGE_BASE_CONFIG
        self.vector_db = None
        self.embedding_model = None
    
    async def search_knowledge_base(
        self,
        query: str,
        knowledge_base_name: str,
        top_k: int = 10,
        score_threshold: float = 0.7,
        use_rerank: bool = True
    ) -> List[Dict[str, Any]]:
        """
        搜索知识库
        
        Args:
            query: 查询内容
            knowledge_base_name: 知识库名称
            top_k: 返回结果数量
            score_threshold: 相似度阈值
            use_rerank: 是否使用重排序
            
        Returns:
            搜索结果列表
        """
        try:
            logger.info(f"开始搜索知识库: {knowledge_base_name}, 查询: {query}")
            
            # 1. 查询扩展
            expanded_queries = await _expand_query(query)
            
            # 2. 多轮检索
            all_results = []
            for expanded_query in expanded_queries:
                results = await _vector_search(
                    query=expanded_query,
                    knowledge_base_name=knowledge_base_name,
                    top_k=top_k * 2,  # 获取更多结果用于去重
                    score_threshold=score_threshold
                )
                all_results.extend(results)
            
            # 3. 去重和合并
            unique_results = self.deduplicate_results(all_results)
            
            # 4. 重排序（可选）
            if use_rerank and unique_results:
                unique_results = await self._rerank_results(query, unique_results)
            
            # 5. 限制返回数量
            final_results = unique_results[:top_k]
            
            logger.info(f"知识库搜索完成，找到 {len(final_results)} 个结果")
            return final_results
            
        except Exception as e:
            logger.error(f"知识库搜索失败: {str(e)}")
            return []

    def deduplicate_results(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        去重和合并结果
        
        Args:
            results: 原始结果列表
            
        Returns:
            去重后的结果列表
        """
        if not results:
            return results
        
        # 基于内容相似度去重
        unique_results = []
        seen_contents = set()
        
        for result in results:
            # 生成内容摘要用于去重
            content = result.get("content", "")
            content_summary = self._generate_content_summary(content)
            
            if content_summary not in seen_contents:
                seen_contents.add(content_summary)
                unique_results.append(result)
        
        # 按分数排序
        unique_results.sort(key=lambda x: x.get("score", 0), reverse=True)
        
        logger.info(f"去重完成，原始结果: {len(results)}, 去重后: {len(unique_results)}")
        return unique_results

    def _generate_content_summary(self, content: str, max_length: int = 100) -> str:
        """
        生成内容摘要用于去重

        Args:
            content: 原始内容
            max_length: 最大长度

        Returns:
            内容摘要
        """
        # 简单的摘要生成：取前N个字符
        summary = content[:max_length].strip()

        # 如果内容被截断，尝试在句号处截断
        if len(content) > max_length:
            last_period = summary.rfind('。')
            if last_period > max_length * 0.7:  # 如果句号位置合理
                summary = summary[:last_period + 1]

        return summary

    async def _rerank_results(
        self,
        query: str,
        results: List[Dict[str, Any]]
    ) -> List[Dict[str, Any]]:
        """
        重排序结果

        Args:
            query: 原始查询
            results: 结果列表

        Returns:
            重排序后的结果列表
        """
        # TODO: 实现重排序逻辑
        # 这里可以使用更复杂的重排序算法，如BM25、语义相似度等

        logger.info(f"重排序结果，查询: {query}")

        # 简单的重排序：基于查询词匹配度
        for result in results:
            content = result.get("content", "").lower()
            query_terms = query.lower().split()

            # 计算查询词匹配度
            match_score = 0
            for term in query_terms:
                if term in content:
                    match_score += 1

            # 更新分数
            original_score = result.get("score", 0)
            result["score"] = (original_score + match_score * 0.1) / 2

        # 重新排序
        results.sort(key=lambda x: x.get("score", 0), reverse=True)

        return results

    async def add_document(
        self,
        knowledge_base_name: str,
        content: str,
        metadata: Dict[str, Any]
    ) -> str:
        """
        添加文档到知识库

        Args:
            knowledge_base_name: 知识库名称
            content: 文档内容
            metadata: 元数据

        Returns:
            文档ID
        """
        # TODO: 实现文档添加逻辑
        # 1. 文本分块
        # 2. 生成嵌入向量
        # 3. 存储到向量数据库

        logger.info(f"添加文档到知识库: {knowledge_base_name}")

        # 生成文档ID
        doc_id = hashlib.md5(f"{knowledge_base_name}_{content[:100]}".encode()).hexdigest()

        # 模拟存储
        logger.info(f"文档添加成功，ID: {doc_id}")

        return doc_id

    async def delete_document(
        self,
        knowledge_base_name: str,
        doc_id: str
    ) -> bool:
        """
        从知识库删除文档

        Args:
            knowledge_base_name: 知识库名称
            doc_id: 文档ID

        Returns:
            是否删除成功
        """
        # TODO: 实现文档删除逻辑

        logger.info(f"删除文档: {doc_id}")

        # 模拟删除
        return True

    async def get_knowledge_base_stats(self, knowledge_base_name: str) -> Dict[str, Any]:
        """
        获取知识库统计信息

        Args:
            knowledge_base_name: 知识库名称

        Returns:
            统计信息
        """
        # TODO: 实现统计信息获取

        return {
            "name": knowledge_base_name,
            "document_count": 1000,
            "total_size": "50MB",
            "last_updated": datetime.now().isoformat(),
            "vector_dimension": 1536,
            "index_type": "HNSW"
        }