from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import RetrieverQueryEngine, TransformQueryEngine
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter
from starlette.websockets import WebSocket

from aimodel import tongyi_model
from src.agents.llama_index_models import embed_model, llm
from src.agents.load_document import vector_store
from src.agents.prompt import qa_template
from websocket_schemas import ChatResponse


async def query(websocket:WebSocket,user_msg:str,file_ids):
    '''qa交互'''
    hybrid_index = VectorStoreIndex.from_vector_store(vector_store=vector_store,embed_model=embed_model)
    filters = MetadataFilters(
        filters=[
            # MetadataFilter(key="author", value="mats@timescale.com"),
            # MetadataFilter(key="author", value="sven@timescale.com"),
        ],
        condition="or",
    )
    vector_retriever = hybrid_index.as_retriever(
        vector_store_query_mode="default",
        similarity_top_k=5,
        #filters=filters,
    )
    text_retriever = hybrid_index.as_retriever(
        vector_store_query_mode="sparse",
        similarity_top_k=5,  # interchangeable with sparse_top_k in this context
        #filters=filters,
    )


    retriever = QueryFusionRetriever(
        [vector_retriever, text_retriever],
        similarity_top_k=5,
        num_queries=1,  # set this to 1 to disable query generation
        mode="relative_score",
        use_async=True,
        llm=llm
    )

    response_synthesizer = CompactAndRefine(streaming=True,llm=llm,text_qa_template=qa_template)
    query_engine = RetrieverQueryEngine(
        retriever=retriever,
        response_synthesizer=response_synthesizer,
    )
    # 加入hyde查询
    hyde = HyDEQueryTransform(include_original=True, llm=llm)
    hyde_query_engine = TransformQueryEngine(query_engine, hyde)
    streaming_response = await hyde_query_engine.aquery(user_msg)
    async for text in streaming_response.response_gen:
        msg = ChatResponse(sender="bot", message=str(text), type="stream")
        await websocket.send_json(msg.dict())
    source_nodes = []

    #标注来源出处
    for node in streaming_response.source_nodes:
        meta = node.metadata
        text = node.text
        meta["file_id"] = "11111aaa"
        meta["kg"] = "测试知识库"
        source_nodes.append({"meta":meta, "text":text})

    # for item in source_nodes:
    #     kg = item['kg']
    #     file_id = item['file_name']
    #     # grouped_data[kg][file_id].append(item)
    msg = ChatResponse(sender="bot", message="", type="stream",source_nodes=source_nodes)
    await websocket.send_json(msg.dict())
    # 标注推荐问题
    questions = ["推荐问题1","推荐问题2","推荐问题3"]
    msg = ChatResponse(sender="bot", message="", type="stream",related_question=questions)
    await websocket.send_json(msg.dict())
    # prompt = ChatPromptTemplate.from_messages(
    #     [
    #         SystemMessage(
    #             content="你是聊天助手，能够友好的回答问题。"
    #         ),
    #         MessagesPlaceholder(variable_name="messages"),
    #     ]
    # )
    # chain = prompt | tongyi_model
    # messages = []
    # messages.append(HumanMessage(question))
    # ai_result = chain.astream(
    #     {
    #         "messages": messages,
    #     }
    # )
