# -*- coding: utf-8 -*-
# =====================
# 
# 
# Author: wanghanmin1
# Date:   2025/7/8
# =====================
import asyncio
import json
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial
from typing import List, AsyncGenerator, Tuple

import aiohttp
import yaml

from genie_tool.util.log_util import logger
from genie_tool.model.document import Doc
from genie_tool.util.log_util import timer
from genie_tool.tool.search_component.query_process import query_decompose
from genie_tool.tool.search_component.answer import answer_question
from genie_tool.model.protocal import StreamMode
from genie_tool.util.file_util import truncate_files
from genie_tool.model.context import LLMModelInfoFactory


class KnowledgeSearch:
    """行内知识搜索工具

    - 通过 Dify 知识库检索 API 获取段落结果
    - 输出协议完全对齐 DeepSearch: extend/search/report 三段流
    """

    def __init__(self):
        # Dify 配置从环境变量读取，支持本地或网关
        self._dify_base_url = os.getenv("KNOWNLEDGE_SEARCH_URL", "http://localhost")
        self._dify_dataset_id = os.getenv("KNOWNLEDGE_SEARCH_DATASET_ID", "")
        self._dify_token = os.getenv("KNOWNLEDGE_SEARCH_API_KEY", "")
        self._top_k = int(os.getenv("DIFY_RETRIEVE_TOP_K", 5))
        self._rerank = os.getenv("DIFY_RERANK_ENABLE", "true").lower() == "true"
        self._search_method = os.getenv("DIFY_SEARCH_METHOD", "hybrid_search")
        self._score_threshold_enabled = os.getenv("DIFY_SCORE_THRESHOLD_ENABLED", "false").lower() == "true"
        self._score_threshold = os.getenv("DIFY_SCORE_THRESHOLD", None)
        self._rerank_model = os.getenv("DIFY_RERANK_MODEL", "gte-rerank")

        # Dify workflow 配置
        self._dify_workflow_token = os.getenv("KNOWNLEDGE_SEARCH_WORKFLOW_API_KEY", "")
        self._dify_workflow_base_url = os.getenv("KNOWNLEDGE_SEARCH_WORKFLOW_URL", "")

        # 飞书知识库配置
        self._feishu_search_url = os.getenv("FEISHU_SEARCH_URL", "")
        self._feishu_get_token_url = os.getenv("FEISHU_GET_TOEKN_URL", "")

        # KBS 配置
        self._kbs_token = os.getenv("KNOWNLEDGE_SEARCH_API_KEY_KBS", "")
        self._kbs_tenant_id = os.getenv("KBS_TENANT_ID", "")
        self._kbs_search_url = os.getenv("KBS_SEARCH_RUL", "")
        self._kbs_application_spacecodes = os.getenv("KBS_APPLICATION_SPACECODES", "")
        # 知识库搜索配置
        self._use_knowledge_engine = os.getenv("USE_KNOWNLEDGE_ENGINE","DIFY")

        self.searched_queries: List[str] = []
        self.current_docs: List[Doc] = []

    async def _dify_retrieve(self, query: str) -> List[Doc]:


        if self._use_knowledge_engine == "KBS":
            """调用 KBS 检索 API，返回 Doc 列表"""
            assert self._dify_dataset_id and self._dify_token, "知识库配置缺失"
            url = self._kbs_search_url
            headers = {
                "Authorization": f"Bearer {self._kbs_token}",
                "Content-Type": "application/json",
                "tenantId": self._kbs_tenant_id
            }

            payload = {
                        "question": query,
                        "applicationSpaceCodes": [self._kbs_application_spacecodes],
                        "turbo": True,
                        "rspType": "JSON",
                        "ragSearch": True,
                        "modelName": "deepseek-r1",
                        "fileIds":[],
                        "spaceCodes":[]
                      }

        if self._use_knowledge_engine == "DIFY_WORKFLOW":
            """调用 DIFY 工作流 KBS 检索 API，返回 Doc 列表"""
            assert self._dify_workflow_base_url and self._dify_workflow_token, "dify工作流配置缺失"
            url = self._dify_workflow_base_url
            headers = {
                "Authorization": f"Bearer {self._dify_workflow_token}",
                "Content-Type": "application/json"
            }

            payload = {
                        "inputs": {
                           "query":query
                        },
                        "response_mode": "blocking",
                        "user": "ryanwu"
                      }

            async with aiohttp.ClientSession() as session:
                logger.info(f"调用dify/kbs payload:\n{payload}")
                async with session.post(url, json=payload, headers=headers, timeout=8000) as response:
                    logger.info(f"调用workflow响应状态码：{response.status}")
                    logger.info(f"调用workflow响应体：{response}")
                    result = await response.json(content_type=None)
                    if self._use_knowledge_engine == "DIFY_WORKFLOW":
                        logger.info(f"原始workflow响应：{result}")
                        result_data = result.get("data")
                        logger.info(f"原始result_data响应：{result_data}")
                        result_op = result_data.get("outputs")
                        logger.info(f"原result_op响应：{result_op}")
                        result_temp = result_op.get("result")
                        result = json.loads(result_temp)
                        logger.info(f"调用dify的workflow执行kbs成功:\n{result}")
                    logger.info(f"调用dify/kbs成功:\n{result}")
                    all_records = result.get("references", [])
                    docs: List[Doc] = []
                    for rec in all_records:
                        for record in rec.get("paragraphs", []):
                            content = record.get("content", "")
                            dataset_doc_id = record.get("dataSetName", "")
                            link = ""  # 知识库块无外链，保持为空
                            docs.append(Doc(
                                doc_type="web_page",
                                content=content,
                                title=dataset_doc_id,
                                link=link,
                                data={
                                    "dataset_doc_id": dataset_doc_id,
                                    "index": record.get("index"),
                                    "page": record.get("page")
                                }
                            ))
                    return docs


        if self._use_knowledge_engine == "DIFY":
            """调用 Dify 检索 API，返回 Doc 列表"""
            assert self._dify_dataset_id and self._dify_token, "知识库配置缺失"
            url = f"{self._dify_base_url}/v1/datasets/{self._dify_dataset_id}/retrieve"
            headers = {
                "Authorization": f"Bearer {self._dify_token}",
                "Content-Type": "application/json"
            }
            payload = {
                "query": query,
                "retrieval_model": {
                    "search_method": self._search_method,
                    "reranking_enable": self._rerank,
                    "reranking_mode": None,
                    "reranking_model": {
                        "reranking_provider_name": "",
                        "reranking_model_name": self._rerank_model,
                    },
                    "weights": None,
                    "top_k": self._top_k,
                    "score_threshold_enabled": self._score_threshold_enabled,
                    "score_threshold": None if self._score_threshold is None else float(self._score_threshold),
                }
            }

            async with aiohttp.ClientSession() as session:
                async with session.post(url, json=payload, headers=headers, timeout=8000) as response:
                    result = await response.json(content_type=None)
                    records = result.get("records", [])
                    docs: List[Doc] = []
                    for rec in records:
                        seg = (rec or {}).get("segment", {})
                        doc_meta = (seg or {}).get("document", {})
                        content = seg.get("content", "")
                        title = doc_meta.get("name", "")
                        link = ""  # 知识库块无外链，保持为空
                        docs.append(Doc(
                            doc_type="web_page",
                            content=content,
                            title=title,
                            link=link,
                            data={
                                "dataset_doc_id": doc_meta.get("id", ""),
                                "segment_id": seg.get("id", ""),
                                "score": rec.get("score", 0.0)
                            }
                        ))
                    return docs

        if self._use_knowledge_engine == "FEISHU":
            """调用 Feishu 检索 API，返回 Doc 列表"""
            # 获取项目根目录
            project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
            feishu_token_path = os.path.join(project_root, "feishu_token.yaml")
            assert self._feishu_get_token_url and self._feishu_search_url, "飞书知识库配置缺失"
            pre_token = self.get_feishu_token(feishu_token_path)
            pre_expires_end = pre_token.get("expires_end")
            if pre_expires_end is None:
                # 没有 expires_end 或 expires_end 为 None，直接使用 pre_token
                all_token = await self.get_all_token(pre_token.get("refresh_token"), feishu_token_path)
            else:
                # 判断当前时间是否超过 expires_end,超过的话，更新token
                if time.time() >= pre_expires_end - 10:
                    all_token = await self.get_all_token(pre_token.get("refresh_token"), feishu_token_path)
                else:
                    all_token = pre_token

            access_token = all_token.get("access_token", "")

            headers = {
                "Authorization": f"Bearer {access_token}",
                "Content-Type": "application/json"
            }
            payload = {
                "global_variable": {
                    "query": query
                }
            }

            async with aiohttp.ClientSession() as session:
                async with session.post(self._feishu_search_url, json=payload, headers=headers, timeout=8000) as response:
                    result = await response.json(content_type=None)
                    logger.info(f"飞书知识库响应：{result}")
                    data = result.get("data", {})
                    outputs = json.loads(data.get("output"))
                    input_message = json.loads(outputs.get("input_message"))
                    docs: List[Doc] = []
                    for rec in input_message:
                        source_value = (rec or {}).get("source_value", {})
                        logger.info(f"飞书知识库source_value响应：{source_value}")
                        content_pre = (source_value or {}).get("content", "")
                        content = self.process_content_feishu(content_pre)
                        logger.info(f"飞书知识库content响应：{content}")
                        doc_info = (source_value or {}).get("doc", {})
                        logger.info(f"飞书知识库doc_info响应：{doc_info}")
                        title = doc_info.get("title", "")
                        logger.info(f"飞书知识库title响应：{title}")
                        link = doc_info.get("link", "")
                        logger.info(f"飞书知识库link响应：{link}")
                        docs.append(Doc(
                            doc_type="web_page",
                            content=content,
                            title=title,
                            link=link,
                            data={
                                "score": rec.get("recall_score", 0.0)
                            }
                        ))
                    return docs


    def process_content_feishu(self, content: str) -> str:
        """处理飞书知识库的 content 内容"""
        # 去掉内容中的[block_sep] 标识
        content = content.replace("[block_sep]", "")
        # 从[content]标识往后开始取字符串
        if "[content]" in content:
            content = content.split("[content]")[-1].strip()
        return content.strip()

    async def get_all_token(self, refresh_token: str = None, feishu_token_path: str = None) -> {}:
        """获取飞书知识库的 access_token"""
        token_dict = {}
        headers = {
            "Content-Type": "application/json"
        }
        payload = {
            "grant_type": "refresh_token",
            "client_id": "cli_a81b5876cf71901c",
            "client_secret": "N1mF3HTxTKHyg6Rt4tD14ca7T34nfKqE",
            "refresh_token": refresh_token
        }
        async with aiohttp.ClientSession() as session:
            async with session.post(self._feishu_get_token_url, headers=headers, json=payload, timeout=8000) as response:
                result = await response.json(content_type=None)
                logger.info(f"飞书知识库token响应：{result}")
                access_token = result.get("access_token", "")
                expires_in = result.get("expires_in")
                # 计算失效时间
                expires_end = time.time() + expires_in
                refresh_token = result.get("refresh_token", "")
                refresh_token_expires_in = result.get("refresh_token_expires_in")
                if refresh_token_expires_in:
                    refresh_token_expires_end = time.time() + refresh_token_expires_in
                token_dict["expires_end"] = expires_end
                token_dict["access_token"] = access_token
                token_dict["expires_in"] = expires_in
                token_dict["refresh_token"] = refresh_token
                token_dict["refresh_token_expires_in"] = refresh_token_expires_in
                if refresh_token_expires_in:
                    token_dict["refresh_token_expires_end"] = refresh_token_expires_end
                logger.info(f"飞书知识库access_token响应：{access_token}")
                logger.info(f"飞书知识库expires_in响应：{expires_in}")
                logger.info(f"飞书知识库refresh_token响应：{refresh_token}")
                logger.info(f"飞书知识库refresh_token_expires_in响应：{refresh_token_expires_in}")
                logger.info(f"飞书知识库expires_end响应：{expires_end}")
                logger.info(f"飞书知识库refresh_token_expires_end响应：{refresh_token_expires_end}")
                self.update_feishu_token(token_dict, feishu_token_path)
                return token_dict


    def get_feishu_token(self, feishu_token_path: str) -> dict:
        """从feishu_token.yaml文件中获取飞书知识库的 access_token"""
        with open(feishu_token_path, "r", encoding="utf-8") as f:
            token_dict = yaml.safe_load(f)
            logger.info(f"飞书知识库token读取成功！{token_dict}")
        return token_dict

    def update_feishu_token(self, token_dict: dict, feishu_token_path: str):
        """更新feishu_token.yaml文件中的飞书知识库的 access_token"""
        with open(feishu_token_path, "w", encoding="utf-8") as f:
            yaml.dump(token_dict, f, allow_unicode=True)
            logger.info(f"飞书知识库refresh_token更新成功")

    def search_docs_str(self, model: str = None) -> str:
        """将当前已检索文档转换为 HTML 片段文本（供回答拼接）"""
        current_docs_str = ""
        max_tokens = LLMModelInfoFactory.get_context_length(model)
        truncate_docs = truncate_files(self.current_docs, max_tokens=int(max_tokens * 0.8)) if model else self.current_docs
        for i, doc in enumerate(truncate_docs, start=1):
            current_docs_str += f"文档编号〔{i}〕. \n{doc.to_html()}\n"
        return current_docs_str

    @timer()
    async def run(
            self,
            query: str,
            request_id: str = None,
            max_loop: int = 1,
            stream: bool = False,
            stream_mode: StreamMode = StreamMode(),
            *args,
            **kwargs
    ) -> AsyncGenerator[str, None]:
        """知识库搜索回复（流式），协议与 DeepSearch 一致"""

        current_loop = 1
        while current_loop <= max_loop:
            logger.info(f"{request_id} 第 {current_loop} 轮知识库搜索...")
            # 查询分解
            sub_queries = await query_decompose(query=query)

            # 扩展阶段
            yield json.dumps({
                "requestId": request_id,
                "query": query,
                "searchResult": {"query": sub_queries, "docs": [[]] * len(sub_queries)},
                "isFinal": False,
                "messageType": "extend"
            }, ensure_ascii=False)

            await asyncio.sleep(0.1)

            # 去重子查询
            sub_queries = [sub_query for sub_query in sub_queries if sub_query not in self.searched_queries]

            # 并行检索知识库
            docs_list: List[List[Doc]] = []
            for sub in sub_queries:
                docs = await self._dify_retrieve(query=sub)
                docs_list.append(docs)

            # 推送检索阶段结果
            truncate_len = int(os.getenv("SINGLE_PAGE_MAX_SIZE", 200))
            yield json.dumps(
                {
                    "requestId": request_id,
                    "query": query,
                    "searchResult": {
                        "query": sub_queries,
                        "docs": [[d.to_dict(truncate_len=truncate_len) for d in docs_l] for docs_l in docs_list]
                    },
                    "isFinal": False,
                    "messageType": "search"
                }, ensure_ascii=False)

            # 更新上下文
            self.current_docs.extend([d for group in docs_list for d in group])
            self.searched_queries.extend(sub_queries)

            # 仅一轮检索 or 根据业务需要扩展多轮
            break

        # 汇总回答
        answer = ""
        acc_content = ""
        acc_token = 0
        async for chunk in answer_question(
                query=query, search_content=self.search_docs_str(os.getenv("SEARCH_ANSWER_MODEL"))
        ):
            if stream:
                if acc_token >= stream_mode.token:
                    yield json.dumps({
                        "requestId": request_id,
                        "query": query,
                        "searchResult": {"query": [], "docs": []},
                        "answer": acc_content,
                        "isFinal": False,
                        "messageType": "report"
                    }, ensure_ascii=False)
                    acc_content = ""
                    acc_token = 0
                acc_content += chunk
                acc_token += 1
            answer += chunk
        if stream and acc_content:
            yield json.dumps({
                "requestId": request_id,
                "query": query,
                "searchResult": {"query": [], "docs": []},
                "answer": acc_content,
                "isFinal": False,
                "messageType": "report"
            }, ensure_ascii=False)
        yield json.dumps({
                "requestId": request_id,
                "query": query,
                "searchResult": {"query": [], "docs": []},
                "answer": "" if stream else answer,
                "isFinal": True,
                "messageType": "report"
            }, ensure_ascii=False)
