import json
from threading import Event
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import RetrieverQueryEngine, TransformQueryEngine
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter
from starlette.websockets import WebSocket

from aimodel import tongyi_model
from src.agents.llama_index_models import embed_model, llm
from src.agents.load_document import vector_store
from src.agents.prompt import qa_template
from src.tools.extract_jsonstring import re_json_string_list, re_json_string
from websocket_schemas import ChatResponse
from src.tools.chain import gen_chain
from src.tools.dms_operator import dmsoperator


async def query(websocket:WebSocket,user_msg:str, knowledge_base_ids, user_id, stop_event:Event):
    '''qa交互'''
    belongfile_ids =[]
    permissions_files = dmsoperator.get_permissions_files(user_id, knowledge_base_ids)
    for fileinfo in permissions_files:
        if not fileinfo["isDelete"]:
            belongfile_ids.append(fileinfo["belongDocumentId"])
    hybrid_index = VectorStoreIndex.from_vector_store(vector_store=vector_store,embed_model=embed_model)
    filters = MetadataFilters(# 筛选对应知识库
        filters=[
            # MetadataFilter(key="knowledge_id", value=f"{knowledge_base_id}") for knowledge_base_id in knowledge_base_ids
            MetadataFilter(key="belong_file_id", value=f"{belongfile_id}") for belongfile_id in belongfile_ids
            # MetadataFilter(key="author", value="sven@timescale.com"),
        ],
        condition="or",
    )
    print(f"过滤：{filters}")
    vector_retriever = hybrid_index.as_retriever(
        vector_store_query_mode="default",
        similarity_top_k=5,
        filters=filters,
    )

    text_retriever = hybrid_index.as_retriever(
        vector_store_query_mode="sparse",
        similarity_top_k=5,
        filters=filters,
    )

    retriever = QueryFusionRetriever(
        [vector_retriever, text_retriever],
        similarity_top_k=5,
        num_queries=1,
        mode="relative_score",
        use_async=True,
        llm=llm
    )

    response_synthesizer = CompactAndRefine(streaming=True,llm=llm,text_qa_template=qa_template)
    query_engine = RetrieverQueryEngine(
        retriever=retriever,
        response_synthesizer=response_synthesizer,
    )
    if stop_event.is_set():
        stop_event.clear()
        end_resp = ChatResponse(sender="bot", message="", type="end")
        await websocket.send_json(end_resp.dict())
        return #如果检测到中止事件， 退出方法 、任何地方
    streaming_response = await query_engine.aquery(user_msg)
    if stop_event.is_set():
        stop_event.clear()
        end_resp = ChatResponse(sender="bot", message="", type="end")
        await websocket.send_json(end_resp.dict())
        return #如果检测到中止事件， 退出方法
    total_text= ""
    async for text in streaming_response.response_gen:
        if stop_event.is_set():
            stop_event.clear()
            end_resp = ChatResponse(sender="bot", message="", type="end")
            await websocket.send_json(end_resp.dict())
            return #如果检测到中止事件， 退出方法
        if "Empty" in text:
            print(f"过滤后没有知识库，返回空回答{text}")
            msg = ChatResponse(sender="bot", message="未找到答案", type="stream")
            total_text = "未找到答案"
            await websocket.send_json(msg.dict())
            break
        else:
            msg = ChatResponse(sender="bot", message=str(text), type="stream")
            total_text = total_text+str(text)
            await websocket.send_json(msg.dict())
    source_nodes = []
    node_textlist = []

    #标注来源出处
    for node in streaming_response.source_nodes:
        meta = node.metadata
        text = node.text
        node_textlist.append(text)
        # if "file_id" not in meta:
        #     meta["file_id"] = "11111aaa"
        # if "knowledge_name" not in meta:
        #     meta["kg"] = "法规指南知识库"
        # else:
        #     meta["kg"] = meta["knowledge_name"]
        source_nodes.append({"kg": meta["knowledge_name"], "meta":meta, "text": text})

    print(f"来源消息：{source_nodes}")
    print(f"来源消息多少：{len(source_nodes)}")

    node_textjson = json.dumps(node_textlist,ensure_ascii=False)
    prompt_related = (
        # "以下是一段内容文本：\n"
        # f"{total_text}\n"
        # "请在下列json列表中找出与上述文本相关的内容，并返回列表序号。\n"
        # f"{node_textjson}\n"
        # # "按照如下格式返回：\n"
        # # "[0,1]\n"
        # # "其中0,1代表返回列表的序号。"

        "你是一位擅长寻找文本内容来源的语言专家和计算机专家：\n"
        "以下是一个json列表：\n"
        f"'''json\n{node_textjson}'''\n"
        f"现在请你找到'{total_text}'来源于列表中哪些元素，给出如下示例，其中position代表最外层列表元素的索引。按照示例格式返回,不要返回多余的内容：\n"
        '{"position": [0, 1, 2]}\n'
        "如果未找到来源就直接返回None"

    )
    print(f"生成来源消息的提示：{prompt_related}")
    related_nodes = gen_chain(prompt_related)
    related_nodes = re_json_string(related_nodes)
    print(f"生成来源消息：{related_nodes}")
    if related_nodes is None:
        related_nodes = '{"position": [0]}'
    related_nodes_dict = json.loads(related_nodes)
    send_nodes = []
    try:
        for i in related_nodes_dict["position"]:
            send_nodes.append(source_nodes[i])
    except:
        send_nodes.append(source_nodes[0])
    # for item in source_nodes:
    #     kg = item['kg']
    #     file_id = item['file_name']
    #     # grouped_data[kg][file_id].append(item)

    merged_data = {}

    for item in send_nodes:
        kg = item["kg"]
        meta = item["meta"]
        meta_id = meta["file_id"]  # 假设根据 id 字段去重

        if kg in merged_data:
            # 检查 meta_id 是否已经存在于字典中
            if meta_id not in merged_data[kg]["meta_ids"]:
                merged_data[kg]["meta_list"].append(meta)  # 添加去重后的 meta
                merged_data[kg]["meta_ids"].add(meta_id)  # 记录 meta_id
        else:
            merged_data[kg] = {
                "meta_list": [meta],
                "meta_ids": {meta_id},  # 使用集合存储 meta_id
            }

    # 将合并后的数据转换回列表形式
    send_nodes_new = [{"kg": kg, "fileinfos": meta_dict["meta_list"]} for kg, meta_dict in merged_data.items()]
    print(send_nodes_new)

    # merged_data = {}
    #
    # for item in send_nodes:
    #     kg = item["kg"]
    #     meta = item["meta"]
    #
    #     if kg in merged_data:
    #         merged_data[kg].append(meta)  # 将 meta 添加到列表中
    #     else:
    #         merged_data[kg] = [meta]  # 初始化一个新的列表
    # send_nodes_new = [{"kg": kg, "fileinfos": meta_list} for kg, meta_list in merged_data.items()]
    print(f"发送来源消息：{send_nodes_new}")
    print(f"发送来源消息多少：{len(send_nodes_new)}")
    msg = ChatResponse(sender="bot", message="", type="stream",source_nodes=send_nodes_new)
    await websocket.send_json(msg.dict())
    # 标注推荐问题
    questions = ["推荐问题1","推荐问题2","推荐问题3"]

    res = tongyi_model.invoke(f"请根据用户问题和回答推荐三个相关问题\n用户问题：\n{user_msg}\n用户回答:\n{total_text}\n回答格式如下\n```json\n{json.dumps(questions,ensure_ascii=False)}\n```")
    print(res.content)
    json_res = re_json_string_list(res.content)
    msg = ChatResponse(sender="bot", message="", type="stream",related_question=json.loads(json_res))
    await websocket.send_json(msg.dict())
    end_resp = ChatResponse(sender="bot", message="", type="end")
    await websocket.send_json(end_resp.dict())
