import re
import time
import hashlib
from decimal import Decimal
from typing import List, Dict, Any
from tortoise import Tortoise
from common.enums.apply import DocsEnum
from common.utils.jieba import JiebaUtil
from common.utils.urls import UrlUtil
from common.models.dev import DevModelsModel
from common.models.agents import AgentKnowArchiveModel
from common.postgres.public import PgKnowledgeModel
from common.postgres.public import PgDocumentsModel
from common.postgres.public import PgAttachmentModel
from common.cache.pool_cache import KeyPoolCache
from common.chain.chain_server import ChatUtils, FlowsSchema
from common.chain.vector_server import VectorService
from common.chain.ranker_server import RerankerServer
from exception import AppException


class RecallKnowLogic:
    @classmethod
    async def embedding_recall(cls, model: str, query: str, knows: List[int], limit: int = 100):
        """
        语义检索
        """
        try:
            embedding_arr = await VectorService().to_embed(model, query)
            embedding_str = "[" + ",".join(str(item) for item in embedding_arr) + "]"
        except Exception as e:
            raise AppException(e.args[1])

        know_ids: str = ",".join(str(x) for x in knows)
        SQL = f"""__SELECT 
                'emb' AS type,
                uuid,user_id,know_id,archive_id,question,answer,(embedding <=> '{embedding_str}') AS emb_score
            FROM {PgKnowledgeModel.Meta.table}
            WHERE know_id IN ({know_ids}) 
                AND status=3
                AND is_delete=0  
            ORDER BY emb_score ASC
            LIMIT {limit}
            """.replace("__", "").replace("            ", "")

        lists = await Tortoise.get_connection("pgsql").execute_query_dict(SQL)
        archives = await AgentKnowArchiveModel.queryArchiveDictsById([item["archive_id"] for item in lists])
        for item in lists:
            archive = archives.get(item["archive_id"], {})
            item["emb_score"] = round((1 - item["emb_score"]), 5)
            item["uuid"] = str(item["uuid"])
            item["file"] = {
                "code": archive.get("code", ""),
                "name": archive.get("name", ""),
                "size": archive.get("size", 0),
                "path": await UrlUtil.to_absolute_url(archive.get("path", ""))
            }
        return lists

    @classmethod
    async def full_text_recall(cls, query: str, knows: List[int], limit: int = 100):
        """
        全文检索
        """
        keyword: str = JiebaUtil.jieba_split(query)
        queries: str = JiebaUtil.preprocess_query(keyword)
        know_ids: str = ",".join(str(x) for x in knows)

        SQL = f"""__SELECT 
                'full' AS type,
                uuid,user_id,know_id,archive_id,question,answer,
                ts_rank_cd(phrases, to_tsquery('zh_en', '{queries}')) AS full_score
            FROM {PgKnowledgeModel.Meta.table}
            WHERE know_id IN ({know_ids}) 
                AND is_delete=0  
                AND phrases @@ to_tsquery('zh_en', '{queries}')
            ORDER BY full_score DESC
            LIMIT {limit}
        """.replace("__", "").replace("            ", "")

        lists = await Tortoise.get_connection("pgsql").execute_query_dict(SQL)
        archives = await AgentKnowArchiveModel.queryArchiveDictsById([item["archive_id"] for item in lists])
        for item in lists:
            archive = archives.get(item["archive_id"], {})
            item["full_score"] = round(item["full_score"], 5)
            item["uuid"] = str(item["uuid"]).strip()
            item["file"] = {
                "code": archive.get("code", ""),
                "name": archive.get("name", ""),
                "size": archive.get("size", 0),
                "path": await UrlUtil.to_absolute_url(archive.get("path", ""))
            }
        return lists

    @classmethod
    async def mixed_recall(cls, model: str, query: str, knows: List[int]):
        """
        混合检索
        """
        # 语义检索
        embeddingResults = await cls.embedding_recall(model=model, query=query, knows=knows, limit=80)
        # 混合检索
        fullTextResults = await cls.full_text_recall(query=query, knows=knows, limit=60)
        # RRF融合
        rrfConcatResults = RecallUtil.rrf_concat_results([
            {"k": 60, "list": embeddingResults},
            {"k": 60, "list": fullTextResults}
        ])
        return rrfConcatResults

    @classmethod
    async def annex_recall(cls, model: str, query: str, ids: List[str], similar: Decimal):
        try:
            embedding_arr = await VectorService().to_embed(model, query)
            embedding_str = "[" + ",".join(str(item) for item in embedding_arr) + "]"
        except Exception as e:
            raise AppException(e.args[1])

        dataset_ids: str = ",".join(f"'{str(x)}'" for x in ids)
        SQL = f"""__SELECT uuid,type,name,path,ext,size,question,
                (embedding <=> '{embedding_str}') AS emb_score
            FROM {PgAttachmentModel.Meta.table}
            WHERE dataset_id IN ({dataset_ids}) 
                AND status=3
                AND scene='know'
            ORDER BY emb_score ASC
            LIMIT 200
            """.replace("__", "").replace("            ", "")

        audio = []
        video = []
        files = []
        images = []
        scene = {"image": images, "audio": audio, "video": video, "file": files}
        lists = await Tortoise.get_connection("pgsql").execute_query_dict(SQL)

        for item in lists:
            score = round((1 - item["emb_score"]), 5)
            if score >= similar:
                array = scene[item["type"]]
                array.append({
                    "id": str(item["uuid"]),
                    "score": round((1 - item["emb_score"]), 5),
                    "file_type": str(item["type"]),
                    "file_name": str(item["name"]),
                    "file_size": int(item["size"]),
                    "file_ext": str(item["ext"]),
                    "file_path": await UrlUtil.to_absolute_url(str(item["path"]))
                })
        return images + video + audio + files


class RecallDocsLogic:
    @classmethod
    async def embedding_recall(cls, model: str, query: str, file_id: int, limit: int = 100):
        """
        语义检索
        """
        try:
            embedding_arr = await VectorService().to_embed(model, query)
            embedding_str = "[" + ",".join(str(item) for item in embedding_arr) + "]"
        except Exception as e:
            raise AppException(e.args[1])

        SQL = f"""__SELECT 
                'emb' AS type,
                uuid,page_no,page_nv,chunk_index,chunk_texts,(embedding <=> '{embedding_str}') AS emb_score
            FROM {PgDocumentsModel.Meta.table}
            WHERE file_id={file_id}
                AND status={DocsEnum.FILE_YES}
                AND is_delete=0  
            ORDER BY emb_score ASC
            LIMIT {limit}
            """.replace("__", "").replace("            ", "")

        lists = await Tortoise.get_connection("pgsql").execute_query_dict(SQL)
        for item in lists:
            item["emb_score"] = round((1 - item["emb_score"]), 5)
            item["uuid"] = str(item["uuid"])
        return lists

    @classmethod
    async def full_text_recall(cls, query: str, file_id: int, limit: int = 100):
        """
        全文检索
        """
        keyword: str = JiebaUtil.jieba_split(query)
        queries: str = JiebaUtil.preprocess_query(keyword)

        SQL = f"""__SELECT 
                'full' AS type,
                uuid,page_no,page_nv,chunk_index,chunk_texts,
                ts_rank_cd(phrases, to_tsquery('zh_en', '{queries}')) AS full_score
            FROM {PgKnowledgeModel.Meta.table}
            WHERE know_id={file_id} 
                AND is_delete=0  
                AND phrases @@ to_tsquery('zh_en', '{queries}')
            ORDER BY full_score DESC
            LIMIT {limit}
        """.replace("__", "").replace("            ", "")

        lists = await Tortoise.get_connection("pgsql").execute_query_dict(SQL)
        for item in lists:
            item["full_score"] = round(item["full_score"], 5)
            item["uuid"] = str(item["uuid"]).strip()

        return lists

    @classmethod
    async def mixed_recall(cls, model: str, query: str, file_id: int):
        """
        混合检索
        """
        # 语义检索
        embeddingResults = await cls.embedding_recall(model=model, query=query, file_id=file_id, limit=80)
        # 混合检索
        fullTextResults = await cls.full_text_recall(query=query, file_id=file_id, limit=60)
        # RRF融合
        rrfConcatResults = RecallUtil.rrf_concat_results([
            {"k": int(60), "list": embeddingResults},
            {"k": int(60), "list": fullTextResults}
        ])
        return rrfConcatResults


class RecallUtil:
    @classmethod
    def rrf_concat_results(cls, recall_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        结果融合 - RRF算法

        Args:
            recall_results (List[Dict[str, Any]]):
                [
                    {"k": 60, "list": embeddings},
                    {"k": 60, "list": fullTexts},
                    {"k": 58, "list": reRankResults}
                ]

        Return:
            List[Dict[str, Any]]
        """
        # 过滤空列表
        recall_results = [item for item in recall_results if len(item['list']) > 0]
        if len(recall_results) == 0:
            return []
        if len(recall_results) == 1:
            return recall_results[0]['list']

        # 使用RRF处理每个召回结果
        item_map = {}
        for result in recall_results:
            k = result["k"]
            for index, data in enumerate(result["list"]):
                _uuid = str(data["uuid"])
                rank = index + 1
                rrf_score = 1 / (k + rank)
                if _uuid in item_map:
                    # 如果项目存在,更新RRF分数
                    item_map[_uuid].update(data)
                    item_map[_uuid]["rrf_score"] += rrf_score
                else:
                    # 添加新的项目,并初始RRF
                    item_map[_uuid] = {
                        **data,
                        "rrf_score": rrf_score
                    }

        # 地图转换为列表并按RRF分数排序
        results = sorted(item_map.values(), key=lambda x: x["rrf_score"], reverse=True)

        # 从结果中删除临时rrf_score字段
        final_results = []
        for item in results:
            final_item = {k: v for k, v in item.items() if k != "rrf_score"}
            final_results.append(final_item)

        # 返回最终的结果
        return final_results

    @classmethod
    async def re_ranker_results(cls, results: List[Dict[str, Any]], question: str, models: str):
        """ 结果重排 """
        start_time = time.time()
        if not results:
            return {"status": False, "usage": None, "results": results}

        seen_hashes = set()
        filtered_results = []
        for item in results:
            # 移除标点符号和空格
            if item.get("chunk_texts") is not None:
                combined_text = item.get("chunk_texts")
            else:
                combined_text = f"{item['question']}{item.get('answer', '')}"
            cleaned_text = re.sub(r"[\W_]+", "", combined_text, flags=re.UNICODE)
            # 哈希过滤重复的数据
            text_hash = hashlib.sha256(cleaned_text.encode("utf-8")).hexdigest()
            if text_hash not in seen_hashes:
                seen_hashes.add(text_hash)
                filtered_results.append(item)

        documents = []
        for item in filtered_results:
            if len(documents) <= 64:
                if item.get("chunk_texts") is not None:
                    text = item.get("chunk_texts")
                else:
                    text = f"{item['question']}\n{item.get('answer', '')}"
                documents.append(text[:4000])

        apikey = {}
        _models = await DevModelsModel.check_models(models, scene="ranking")
        if _models["config"].get("check_key"):
            apikey = await KeyPoolCache(scene="ranking", channel=_models["mark"]).get_key() or {}
            if not apikey:
                raise AppException("重排模型尚未配置密钥,请与管理员联系")

        reServer = RerankerServer(_models["config"], apikey)
        reResults = await reServer.send(question, documents)

        reUsage = reResults.get("usage") or {}
        total_tokens: int = reUsage.get("total_tokens", 0)
        consume_points: int = ChatUtils.compute_price(total_tokens, _models["price"])
        usages = FlowsSchema(
            name="结果重排",
            scene="rerank",
            alias=_models["alias"],
            model=_models["model"],
            task_time=f"{(time.time() - start_time):.2f}",
            use_points=consume_points,
            total_tokens=total_tokens,
            prompt_tokens=total_tokens,
            completion_tokens=0
        )

        merge_result = []
        for item in reResults["results"]:
            data = filtered_results[int(item["index"])]
            data["rerank_score"] = round(item["relevance_score"], 5)
            merge_result.append(data)
        return {"status": True, "usage": usages, "results": merge_result}

    @classmethod
    def filter_max_score(cls, results: List[Dict[str, Any]], mode: str, rerank: bool, similar: Decimal):
        """ 相似度过滤 """
        if rerank:
            return [item for item in results if item.get("rerank_score", 0) >= similar]

        if mode == "similar":
            return [item for item in results if item.get("emb_score", 0) >= similar]

        return results

    @classmethod
    def filter_max_tokens(cls, results: List[Dict[str, Any]], max_tokens: int):
        """ Tokens过滤 """
        total_tokens: int = 0
        filter_lists: list = []
        for item in results:
            if item.get("chunk_texts") is not None:
                text: str = item.get("chunk_texts")
            else:
                text: str = f"{item['question']}{item['answer']}"
            total_tokens += ChatUtils.compute_tokens(text)
            filter_lists.append(item)
            if total_tokens > max_tokens:
                break
        return filter_lists
