import os
from collections import defaultdict
from typing import Dict, List, Optional, Tuple
from urllib.parse import urlparse

import elasticsearch
from aiocache import Cache, cached
from aiocache.serializers import PickleSerializer

import tree_rag
from tree_rag import adapters

from .dataclasses.api import AssistantInfo, Clarify, Explanation, Knowledge, RefuseWords, RefuseWordsExclusion, SimpleAnswerPrompt
from .dataclasses.knowledge import ALL_NODES_TYPE, Graph

REDIS_HOST = urlparse(os.getenv("REDIS_HOST")).hostname
REDIS_PORT = urlparse(os.getenv("REDIS_HOST")).port
REDIS_PASSWORD = os.getenv("REDIS_PASSWORD")

CACHE = Cache(
    Cache.REDIS,
    endpoint=REDIS_HOST,
    port=REDIS_PORT,
    namespace=tree_rag.APP_NAME,
    password=REDIS_PASSWORD,
)

DIM_SIZE = 1024

datasets_maaping = {
    "mappings": {
        "properties": {
            "dataset_id": {"type": "keyword"},
            "segment_id": {"type": "keyword"},
            "desheng_id": {"type": "keyword"},
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "question": {
                "type": "text",
                "analyzer": "standard",
            },
            "question_vector": {
                "type": "dense_vector",
                "dims": DIM_SIZE,
                "index": True,
                "similarity": "cosine",
            },
            "answer": {
                "type": "text",
                "analyzer": "standard",
            },
            "supplementary": {
                "type": "text",
                "index": False,
            },
            "answer_vector": {
                "type": "dense_vector",
                "dims": DIM_SIZE,
                "index": True,
                "similarity": "cosine",
            },
            "graph": {
                "properties": {
                    "level_1": {"type": "keyword"},
                    "level_2": {"type": "keyword"},
                    "scene": {
                        "properties": {
                            "label": {"type": "keyword"},
                            "category": {"type": "keyword"},
                        }
                    },
                    "keywords": {"type": "keyword"},
                }
            },
            "extraction": {
                "properties": {
                    "prompt": {"type": "text", "index": False},
                    "failure_default": {"type": "object", "enabled": False},
                    "reask_words": {"type": "object", "enabled": False},
                }
            },
            "ktype": {"type": "keyword"},
        }
    },
}

explanation_mapping = {
    "mappings": {
        "properties": {
            "dataset_id": {"type": "keyword"},
            "segment_id": {"type": "keyword"},
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "explanation_id": {"type": "keyword"},
            "type": {"type": "keyword"},
            "label": {"type": "keyword"},
            "explanation": {
                "type": "text",
                "index": False,
            },
            "alias": {
                "type": "keyword",
                "index": False,
            },
        }
    },
}

clarify_mapping = {
    "mappings": {
        "properties": {
            "dataset_id": {"type": "keyword"},
            "segment_id": {"type": "keyword"},
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "clarify_id": {"type": "keyword"},
            "category": {"type": "keyword"},
            "words": {"type": "keyword"},
        }
    },
}

refuse_mapping = {
    "mappings": {
        "properties": {
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "refuse_id": {"type": "keyword"},
            "words": {"type": "keyword"},
        }
    },
}

refuse_exclusion_mapping = {
    "mappings": {
        "properties": {
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "refuse_exclusion_id": {"type": "keyword"},
            "category": {"type": "keyword"},
            "examples": {"type": "keyword"},
            "definition": {"type": "text"},
            "words": {"type": "keyword"},
        }
    },
}

REFUSE_INDEX = tree_rag.APP_NAME + "_refuse"
REFUSE_EXCLUSION_INDEX = tree_rag.APP_NAME + "_refuse_exclusion"

ASSISTANT_INDEX = tree_rag.APP_NAME + "_assistant"
SIMPLE_ANSWER_PROMPT_INDEX = tree_rag.APP_NAME + "_simple_answer_prompt"

assistant_mapping = {
    "mappings": {
        "properties": {
            "assistant_id": {"type": "keyword"},
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "agent_name": {"type": "text"},
            "position": {"type": "text"},
            "service": {"type": "text"},
        }
    },
}

simple_answer_prompt_mapping = {
    "mappings": {
        "properties": {
            "prompt_id": {"type": "keyword"},
            "tenant_id": {"type": "keyword"},
            "channel_id": {"type": "keyword"},
            "prompt": {"type": "text", "index": False},
        }
    },
}


ES_HOST = os.getenv("ES_HOST", "http://localhost:9200")
es_client = elasticsearch.AsyncElasticsearch(
    [ES_HOST], basic_auth=("elastic", "telecom12345")
)


def cache_clear_decorator(func):
    async def wrapper(*args, **kwargs):
        await CACHE.clear()
        return await func(*args, **kwargs)

    return wrapper


@cache_clear_decorator
async def index_document(
    doc: "Knowledge", question_vector: List[float], answer_vector: List[float]
):
    if not await es_client.options(ignore_status=[400, 404]).indices.exists(
        index=tree_rag.DATASET_INDEX
    ):
        await es_client.indices.create(
            index=tree_rag.DATASET_INDEX, body=datasets_maaping
        )
    document = {
        "dataset_id": doc.dataset_id,
        "segment_id": doc.segment_id,
        "tenant_id": doc.tenant_id,
        "channel_id": doc.channel_id,
        "deshegn_id": doc.desheng_id,  # TODO rename it
        "question": doc.question,
        "question_vector": question_vector,
        "answer": doc.answer,
        "supplementary": doc.supplementary,
        "answer_vector": answer_vector,
        "graph": doc.graph.model_dump(),
        "extraction": doc.extraction.model_dump() if doc.extraction else None,
        "ktype": doc.ktype,
    }
    await es_client.index(
        index=tree_rag.DATASET_INDEX, body=document, id=doc.segment_id, refresh=True
    )


async def index_explanation(
    explanation: "Explanation",
):
    if not await es_client.indices.exists(index=tree_rag.EXPLANATION_INDEX):
        await es_client.indices.create(
            index=tree_rag.EXPLANATION_INDEX, body=explanation_mapping
        )
    await es_client.index(
        index=tree_rag.EXPLANATION_INDEX,
        body=explanation.model_dump(),
        id=explanation.explanation_id,
        refresh=True,
    )


async def index_clarify(
    clarify: "Clarify",
):
    if not await es_client.indices.exists(index=tree_rag.CLARIFY_INDEX):
        await es_client.indices.create(
            index=tree_rag.CLARIFY_INDEX, body=clarify_mapping
        )
    await es_client.index(
        index=tree_rag.CLARIFY_INDEX,
        body=clarify.model_dump(),
        id=clarify.clarify_id,
        refresh=True,
    )


async def index_refuse_words(
    refuse: "RefuseWords",
):
    if not await es_client.indices.exists(index=REFUSE_INDEX):
        await es_client.indices.create(index=REFUSE_INDEX, body=refuse_mapping)
    await es_client.index(
        index=REFUSE_INDEX,
        body=refuse.model_dump(),
        id=refuse.refuse_id,
        refresh=True,
    )


async def index_refuse_words_exclusion(
    refuse_exclusion: "RefuseWordsExclusion",
):
    if not await es_client.indices.exists(index=REFUSE_EXCLUSION_INDEX):
        await es_client.indices.create(index=REFUSE_EXCLUSION_INDEX, body=refuse_exclusion_mapping)
    await es_client.index(
        index=REFUSE_EXCLUSION_INDEX,
        body=refuse_exclusion.model_dump(),
        id=refuse_exclusion.refuse_exclusion_id,
        refresh=True,
    )


async def index_assistant(
    assistant: "AssistantInfo",
):
    if not await es_client.indices.exists(index=ASSISTANT_INDEX):
        await es_client.indices.create(index=ASSISTANT_INDEX, body=assistant_mapping)
    assistant_dict = assistant.model_dump()
    await es_client.index(
        index=ASSISTANT_INDEX,
        body=assistant_dict,
        id=assistant.assistant_id,
        refresh=True,
    )


async def index_simple_answer_prompt(
    prompt: "SimpleAnswerPrompt",
):
    if not await es_client.indices.exists(index=SIMPLE_ANSWER_PROMPT_INDEX):
        await es_client.indices.create(index=SIMPLE_ANSWER_PROMPT_INDEX, body=simple_answer_prompt_mapping)
    await es_client.index(
        index=SIMPLE_ANSWER_PROMPT_INDEX,
        body=prompt.model_dump(),
        id=prompt.prompt_id,
        refresh=True,
    )


async def get_assistant_by_name(agent_name: str) -> Optional["AssistantInfo"]:
    if not await es_client.indices.exists(index=ASSISTANT_INDEX):
        return None

    query = {"bool": {"must": [{"match": {"agent_name": agent_name}}]}}
    response = await es_client.search(
        index=ASSISTANT_INDEX,
        query=query,
        size=1,
    )
    hits = response["hits"]["hits"]
    if len(hits) == 0:
        return None
    source = hits[0]["_source"]
    return AssistantInfo(
        assistant_id=source["assistant_id"],
        tenant_id=source["tenant_id"],
        channel_id=source["channel_id"],
        agent_name=source["agent_name"],
        position=source.get("position", None),
        service=source.get("service", None),
    )


async def get_assistant_by_channel_id(channel_id: str) -> Optional["AssistantInfo"]:
    if not await es_client.indices.exists(index=ASSISTANT_INDEX):
        return None

    query = {"bool": {"must": [{"term": {"channel_id": channel_id}}]}}
    response = await es_client.search(
        index=ASSISTANT_INDEX,
        query=query,
        size=1,
    )
    hits = response["hits"]["hits"]
    if len(hits) == 0:
        return None
    source = hits[0]["_source"]
    return AssistantInfo(
        assistant_id=source["assistant_id"],
        tenant_id=source["tenant_id"],
        channel_id=source["channel_id"],
        agent_name=source["agent_name"],
        position=source.get("position", None),
        service=source.get("service", None),
    )


async def get_simple_answer_prompt_by_id(prompt_id: str) -> Optional["SimpleAnswerPrompt"]:
    if not await es_client.indices.exists(index=SIMPLE_ANSWER_PROMPT_INDEX):
        return None

    response = await es_client.get(
        index=SIMPLE_ANSWER_PROMPT_INDEX,
        id=prompt_id,
        ignore=[404]
    )
    if not response.get("found", False):
        return None
    
    source = response["_source"]
    return SimpleAnswerPrompt(
        prompt_id=source["prompt_id"],
        tenant_id=source["tenant_id"],
        channel_id=source["channel_id"],
        prompt=source["prompt"],
    )


async def get_simple_answer_prompt_by_tenant_channel(tenant_id: str, channel_id: str) -> Optional["SimpleAnswerPrompt"]:
    if not await es_client.indices.exists(index=SIMPLE_ANSWER_PROMPT_INDEX):
        return None

    query = {
        "bool": {
            "must": [
                {"term": {"tenant_id": tenant_id}},
                {"term": {"channel_id": channel_id}},
            ]
        }
    }
    response = await es_client.search(
        index=SIMPLE_ANSWER_PROMPT_INDEX,
        query=query,
        size=1,
    )
    hits = response["hits"]["hits"]
    if len(hits) == 0:
        return None
    
    source = hits[0]["_source"]
    return SimpleAnswerPrompt(
        prompt_id=source["prompt_id"],
        tenant_id=source["tenant_id"],
        channel_id=source["channel_id"],
        prompt=source["prompt"],
    )


async def query_clarify(
    tenant_id: str,
    channel_id: str,
    category: str,
) -> List[str]:
    # if index not exists, return empty list
    if not await es_client.indices.exists(index=tree_rag.CLARIFY_INDEX):
        return []

    query = {
        "bool": {
            "filter": [
                {"term": {"tenant_id": tenant_id}},
                {"term": {"channel_id": channel_id}},
                {"term": {"category": category}},
            ]
        }
    }
    response = await es_client.search(
        index=tree_rag.CLARIFY_INDEX,
        query=query,
        size=1,
        source_includes=["words"],
    )
    hits = response["hits"]["hits"]
    if len(hits) == 0:
        return []
    else:
        return hits[0]["_source"]["words"]


async def query_refuse_words(
    tenant_id: str,
    channel_id: str,
) -> List[str]:
    # if index not exists, return empty list
    if not await es_client.indices.exists(index=REFUSE_INDEX):
        return []

    query = {
        "bool": {
            "filter": [
                {"term": {"tenant_id": tenant_id}},
                {"term": {"channel_id": channel_id}},
            ]
        }
    }
    response = await es_client.search(
        index=REFUSE_INDEX,
        query=query,
        size=1,
        source_includes=["words"],
    )
    hits = response["hits"]["hits"]
    if len(hits) == 0:
        return []
    else:
        source = hits[0]["_source"]
        return source.get("words", [])


async def query_refuse_words_exclusion(
    tenant_id: str,
    channel_id: str,
) -> List[Dict]:
    # if index not exists, return empty list
    if not await es_client.indices.exists(index=REFUSE_EXCLUSION_INDEX):
        return []

    query = {
        "bool": {
            "filter": [
                {"term": {"tenant_id": tenant_id}},
                {"term": {"channel_id": channel_id}},
            ]
        }
    }
    response = await es_client.search(
        index=REFUSE_EXCLUSION_INDEX,
        query=query,
        size=100,  # Support multiple exclusion rules
        source_includes=["category", "examples", "definition", "words"],
    )
    hits = response["hits"]["hits"]
    exclusions = []
    for hit in hits:
        source = hit["_source"]
        exclusions.append({
            "category": source.get("category", ""),
            "examples": source.get("examples", []),
            "definition": source.get("definition", ""),
            "words": source.get("words", [])
        })
    return exclusions


async def get_refuse_config(
    tenant_id: str,
    channel_id: str,
) -> Dict:
    """Get merged refuse words configuration with exclusions"""
    refuse_words = await query_refuse_words(tenant_id, channel_id)
    exclusions = await query_refuse_words_exclusion(tenant_id, channel_id)
    
    return {
        "words": refuse_words,
        "exclusion": exclusions
    }


@cache_clear_decorator
async def delete(id_type: str, id: List[str]):
    """
    Delete records based on id_type and a list of ids using efficient batch operations.

    Args:
        id_type: Type of ID ("dataset", "segment", "explanation", "clarify")
        id: List of IDs to delete
    """
    if not id:
        return

    if id_type == "dataset":
        await es_client.delete_by_query(
            index=tree_rag.DATASET_INDEX,
            body={"query": {"terms": {"dataset_id": id}}},
            refresh=True,
        )
    elif id_type == "segment":
        if len(id) == 1:
            await es_client.delete(
                index=tree_rag.DATASET_INDEX, id=id[0], refresh=True, ignore=[400, 404]
            )
        else:
            # For multiple segment IDs, use bulk delete
            operations = [
                {"delete": {"_index": tree_rag.DATASET_INDEX, "_id": segment_id}}
                for segment_id in id
            ]
            await es_client.bulk(operations=operations, refresh=True)
    elif id_type == "explanation":
        await es_client.delete_by_query(
            index=tree_rag.EXPLANATION_INDEX,
            body={"query": {"terms": {"explanation_id": id}}},
            refresh=True,
        )
    elif id_type == "clarify":
        await es_client.delete_by_query(
            index=tree_rag.CLARIFY_INDEX,
            body={"query": {"terms": {"clarify_id": id}}},
            refresh=True,
        )
    elif id_type == "refuse_words":
        await es_client.delete_by_query(
            index=REFUSE_INDEX,
            body={"query": {"terms": {"refuse_id": id}}},
            refresh=True,
        )
    elif id_type == "refuse_words_exclusion":
        await es_client.delete_by_query(
            index=REFUSE_EXCLUSION_INDEX,
            body={"query": {"terms": {"refuse_exclusion_id": id}}},
            refresh=True,
        )
    elif id_type == "assistant":
        await es_client.delete_by_query(
            index=ASSISTANT_INDEX,
            body={"query": {"terms": {"assistant_id": id}}},
            refresh=True,
        )
    elif id_type == "simple_answer_prompt":
        await es_client.delete_by_query(
            index=SIMPLE_ANSWER_PROMPT_INDEX,
            body={"query": {"terms": {"prompt_id": id}}},
            refresh=True,
        )
    else:
        raise ValueError(f"Unknown id_type: {id_type}")


@cached(
    cache=Cache.REDIS,
    endpoint=REDIS_HOST,
    port=REDIS_PORT,
    serializer=PickleSerializer(),
    namespace=tree_rag.APP_NAME,
    password=REDIS_PASSWORD,
)
async def get_all_nodes(tenant_id, channel_id) -> Tuple[ALL_NODES_TYPE, Dict]:
    query = {
        "bool": {
            "filter": {
                "bool": {
                    "must": [
                        {"term": {"tenant_id": tenant_id}},
                        {"term": {"channel_id": channel_id}},
                    ],
                    "must_not": [
                        {"term": {"graph.level_1": "其他事项"}},
                        {"term": {"ktype": "goodcase"}},
                        {"term": {"ktype": "badcase"}},
                    ],
                }
            }
        }
    }
    response = await es_client.search(
        index=tree_rag.DATASET_INDEX,
        scroll="2m",
        size=1000,
        source_includes=["graph"],
        query=query,
    )
    hits = []
    # 获取第一页的数据
    scroll_id = response["_scroll_id"]
    cur_hits = response["hits"]["hits"]

    # 循环读取所有数据
    while len(cur_hits) > 0:
        # 处理当前页的数据
        hits.extend(cur_hits)

        # 使用 scroll_id 获取下一页数据
        response = await es_client.scroll(scroll_id=scroll_id, scroll="2m")

        # 更新 scroll_id 和 hits
        scroll_id = response["_scroll_id"]
        cur_hits = response["hits"]["hits"]

    # 清除 scroll 上下文
    await es_client.clear_scroll(scroll_id=scroll_id)

    # get all graph
    graphs = []
    for hit in hits:
        if "graph" not in hit["_source"]:
            continue
        graph = Graph.model_validate(hit["_source"]["graph"])
        graphs.append(graph)

    kg_tree = dict()
    all_level_1 = set()
    all_level_2 = set()
    all_scene = defaultdict(set)
    all_keywords = set()
    all_scene_category = set()
    all_scene_label = set()
    scene_label_to_category = dict()

    for graph in graphs:
        all_level_1.update(graph.level_1)
        all_level_2.update(graph.level_2)
        all_keywords.update(graph.keywords)
        for scene in graph.scene:
            all_scene[scene.category].add(scene.label)
            all_scene_category.add(scene.category)
            all_scene_label.add(scene.label)
            scene_label_to_category[scene.label] = scene.category

        # fill in the tree
        for level_1 in graph.level_1:
            if level_1 not in kg_tree:
                kg_tree[level_1] = {}
            for level_2 in graph.level_2:
                if level_2 not in kg_tree[level_1]:
                    kg_tree[level_1][level_2] = {}

                for scene in graph.scene:
                    if scene.category not in kg_tree[level_1][level_2]:
                        kg_tree[level_1][level_2][scene.category] = {}
                    if scene.label not in kg_tree[level_1][level_2][scene.category]:
                        kg_tree[level_1][level_2][scene.category][scene.label] = set()
                    kg_tree[level_1][level_2][scene.category][scene.label].update(
                        graph.keywords
                    )

                if len(graph.scene) == 0:
                    for keyword in graph.keywords:
                        if keyword not in kg_tree[level_1][level_2]:
                            kg_tree[level_1][level_2][keyword] = {}

    return ALL_NODES_TYPE(
        all_level_1=all_level_1,
        all_level_2=all_level_2,
        all_scene=all_scene,
        all_keywords=all_keywords,
        all_scene_category=all_scene_category,
        all_scene_label=all_scene_label,
        scene_label_to_category=scene_label_to_category,
    ), kg_tree


def build_es_search_query(graph: Graph, use_scene=True) -> dict:
    """Build an Elasticsearch query from the graph fields.

    Returns:
        dict: Elasticsearch query in bool/should format
    """
    must_clauses = []

    if graph.level_1:
        should_clauses = [{"term": {"graph.level_1": label}} for label in graph.level_1]
        must_clauses.append(
            {"bool": {"should": should_clauses, "minimum_should_match": 1}}
        )

    if graph.level_2:
        should_clauses = [{"term": {"graph.level_2": label}} for label in graph.level_2]
        must_clauses.append(
            {"bool": {"should": should_clauses, "minimum_should_match": 1}}
        )

    if graph.scene and use_scene:
        should_clauses = [
            {"term": {"graph.scene.label": scene.label}} for scene in graph.scene
        ]
        must_clauses.append(
            {"bool": {"should": should_clauses, "minimum_should_match": 1}}
        )

    # TODO USE KEYWORDS
    return {
        "must": must_clauses,
        "minimum_should_match": 0,
    }


async def search(
    tenant_id: str,
    channel_id: str,
    query: str | Graph,
    mode: str = "q",  # q: question, a: answer, g: graph, gc: goodcase, bc: badcase
    size: int = 1,
    rerank_threshold: float = 0,
    search_other=False,  # 是否搜索其他事项
    search_self_generated=True,  # 是否搜索自主生成的问题
    text_query: Optional[str] = None,  # if mode is g, this is the text query
) -> List[Knowledge]:
    filters = {
        "filter": {
            "bool": {
                "must": [
                    {"term": {"tenant_id": tenant_id}},
                    {"term": {"channel_id": channel_id}},
                ],
                "must_not": [],
            }
        }
    }
    if not search_other:
        filters["filter"]["bool"]["must_not"].append(
            {"term": {"graph.level_1": "其他事项"}}
        )
    if not search_self_generated:
        filters["filter"]["bool"]["must_not"].append(
            {"match_phrase": {"answer": "自主生成"}}
        )
    if mode == "g":
        filters["filter"]["bool"]["must_not"].extend(
            [
                {"term": {"ktype": "goodcase"}},
                {"term": {"ktype": "badcase"}},
            ]
        )
        search_query = build_es_search_query(query, use_scene=True)
        es_query = {"bool": {**filters, **search_query}}

        search_result = await es_client.search(
            index=tree_rag.DATASET_INDEX,
            query=es_query,
            size=size,
            source_excludes=["question_vector", "answer_vector"],
        )
        if len(search_result["hits"]["hits"]) == 0:
            search_query = build_es_search_query(query, use_scene=False)
            es_query = {"bool": {**filters, **search_query}}
            search_result = await es_client.search(
                index=tree_rag.DATASET_INDEX,
                query=es_query,
                size=size,
                source_excludes=["question_vector", "answer_vector"],
            )
        knowledges = []

        # Use rerank score instead of graph match score for "g" mode
        if search_result["hits"]["hits"] and text_query:
            rerank_results = await adapters.rerank(
                [hit["_source"]["question"] + "\n" + hit["_source"]["answer"] for hit in search_result["hits"]["hits"]],
                text_query,
            )
            rerank_results = sorted(rerank_results, key=lambda x: x.index, reverse=False)
            scores = [item.relevance_score for item in rerank_results]

            for hit, score in sorted(
                zip(search_result["hits"]["hits"], scores),
                key=lambda x: x[1],
                reverse=True,
            ):
                if score < rerank_threshold:
                    break
                graph = Graph.model_validate(hit["_source"]["graph"])
                knowledge = Knowledge(
                    dataset_id=hit["_source"]["dataset_id"],
                    segment_id=hit["_source"]["segment_id"],
                    tenant_id=hit["_source"]["tenant_id"],
                    channel_id=hit["_source"]["channel_id"],
                    desheng_id=hit["_source"]["deshegn_id"],  # TODO Rename it
                    question=hit["_source"]["question"],
                    answer=hit["_source"]["answer"],
                    ktype=hit["_source"].get("ktype", None),
                    extraction=hit["_source"].get("extraction", None),
                    supplementary=hit["_source"].get("supplementary", None),
                    graph=graph,
                    matched_score=score,
                )
                knowledges.append(knowledge)
            knowledges = sorted(knowledges, key=lambda x: x.matched_score, reverse=True)
        else:
            # Fallback to original behavior if no text_query provided
            for hit in search_result["hits"]["hits"]:
                graph = Graph.model_validate(hit["_source"]["graph"])
                knowledge = Knowledge(
                    dataset_id=hit["_source"]["dataset_id"],
                    segment_id=hit["_source"]["segment_id"],
                    tenant_id=hit["_source"]["tenant_id"],
                    channel_id=hit["_source"]["channel_id"],
                    desheng_id=hit["_source"]["deshegn_id"],  # TODO Rename it
                    question=hit["_source"]["question"],
                    answer=hit["_source"]["answer"],
                    ktype=hit["_source"].get("ktype", None),
                    extraction=hit["_source"].get("extraction", None),
                    supplementary=hit["_source"].get("supplementary", None),
                    graph=graph,
                    matched_score=query.match_score(graph),
                )
                knowledges.append(knowledge)

    else:
        if mode == "gc":
            filters["filter"]["bool"]["must"].append({"term": {"ktype": "goodcase"}})
        elif mode == "bc":
            filters["filter"]["bool"]["must"].append({"term": {"ktype": "badcase"}})
        else:
            filters["filter"]["bool"]["must_not"].extend(
                [
                    {"term": {"ktype": "goodcase"}},
                    {"term": {"ktype": "badcase"}},
                ]
            )
        # search_query = {"must": {"match_all": {}}}
        embedding: List[float] = await adapters.embedding(query)
        # Modified knn query
        knn = {
            "field": "answer_vector" if mode == "a" else "question_vector",
            "query_vector": embedding,
            "k": size,  # Set k to size directly
            "num_candidates": max(
                50, size * 2
            ),  # Ensure num_candidates is larger than k
            "filter": filters["filter"],
        }

        search_result = await es_client.search(
            index=tree_rag.DATASET_INDEX,  # Add index parameter
            knn=knn,
            # rank={"rrf": {}},
            size=size,
            source_excludes=["question_vector", "answer_vector"],
        )
        knowledges = []
        if len(search_result["hits"]["hits"]) > 0:
            column = "answer" if mode == "a" else "question"
            rerank_results = await adapters.rerank(
                [hit["_source"][column] for hit in search_result["hits"]["hits"]], query
            )
            scores = [item.relevance_score for item in rerank_results]
            for hit, score in zip(search_result["hits"]["hits"], scores):
                if score < rerank_threshold:
                    continue
                knowledge = Knowledge(
                    dataset_id=hit["_source"]["dataset_id"],
                    segment_id=hit["_source"]["segment_id"],
                    desheng_id=hit["_source"]["deshegn_id"],  # TODO Rename it
                    tenant_id=hit["_source"]["tenant_id"],
                    channel_id=hit["_source"]["channel_id"],
                    question=hit["_source"]["question"],
                    answer=hit["_source"]["answer"],
                    ktype=hit["_source"].get("ktype", None),
                    extraction=hit["_source"].get("extraction", None),
                    supplementary=hit["_source"].get("supplementary", None),
                    graph=Graph.model_validate(hit["_source"]["graph"]),
                    matched_score=score,
                )
                knowledges.append(knowledge)
    return knowledges


async def search_information(
    query: str, level_1: str, tenant_id: str, channel_id: str, size: int, mode="q"
) -> List[Knowledge]:
    filters = {
        "filter": {
            "bool": {
                "must": [
                    {"term": {"tenant_id": tenant_id}},
                    {"term": {"channel_id": channel_id}},
                    {"term": {"ktype": "information"}},
                    {"term": {"graph.level_1": level_1}},
                ],
            }
        }
    }
    # search_query = {"must": {"match_all": {}}}
    embedding: List[float] = await adapters.embedding(query)
    # Modified knn query
    knn = {
        "field": "answer_vector" if mode == "a" else "question_vector",
        "query_vector": embedding,
        "k": size,  # Set k to size directly
        "num_candidates": max(50, size * 2),  # Ensure num_candidates is larger than k
        "filter": filters["filter"],
    }

    search_result = await es_client.search(
        index=tree_rag.DATASET_INDEX,  # Add index parameter
        knn=knn,
        # rank={"rrf": {}},
        size=size,
        source_excludes=["question_vector", "answer_vector"],
    )
    knowledges = []
    if len(search_result["hits"]["hits"]) > 0:
        for hit in search_result["hits"]["hits"]:
            knowledge = Knowledge(
                dataset_id=hit["_source"]["dataset_id"],
                segment_id=hit["_source"]["segment_id"],
                desheng_id=hit["_source"]["deshegn_id"],  # TODO Rename it
                tenant_id=hit["_source"]["tenant_id"],
                channel_id=hit["_source"]["channel_id"],
                question=hit["_source"]["question"],
                answer=hit["_source"]["answer"],
                ktype=hit["_source"].get("ktype", None),
                extraction=hit["_source"].get("extraction", None),
                supplementary=hit["_source"].get("supplementary", None),
                graph=Graph.model_validate(hit["_source"]["graph"]),
                matched_score=hit["_score"],
            )
            knowledges.append(knowledge)
    return knowledges


@cache_clear_decorator
async def clear_all():
    await es_client.indices.delete(index=tree_rag.DATASET_INDEX, ignore=[400, 404])
    await es_client.indices.delete(index=tree_rag.EXPLANATION_INDEX, ignore=[400, 404])
    await es_client.indices.delete(index=tree_rag.CLARIFY_INDEX, ignore=[400, 404])
    await es_client.indices.delete(index=REFUSE_INDEX, ignore=[400, 404])


async def clear_cache():
    await CACHE.clear()
