import lazyllm
from lazyllm import (
    fc_register, Document, Retriever, 
    OnlineEmbeddingModule, OnlineChatModule, WebModule,
    ReactAgent,Reranker,SentenceSplitter,pipeline
)
base_url_http = "https://dashscope.aliyuncs.com/compatible-mode"
doc_path="F:/file/my/workplace/agentLazyLLM/lazyllm-venv/docs"


# embed_model = OnlineEmbeddingModule(source="qwen")
# doc = Document(dataset_path=doc_path,embed=embed_model)
# retriever = Retriever(doc,group_name="CoarseChunk",similarity="cosine",topk=3)

#在线重排
# online_rerank = OnlineEmbeddingModule(source='qwen', stream=False, model='text-embedding-v4',type="rerank")
online_rerank = OnlineEmbeddingModule(source='qwen', stream=False, model='text-embedding-v4',type="rerank")
doc = Document(dataset_path=doc_path,embed=online_rerank)
# retriever = Retriever(doc,group_name="CoarseChunk",similarity="cosine",topk=3)

# reranker = Reranker('ModuleReranker', model=online_rerank, topk=3)
doc.create_node_group(name="block", transform=SentenceSplitter, chunk_size=1024, chunk_overlap=100)
doc.create_node_group(name="line", transform=SentenceSplitter, chunk_size=128, chunk_overlap=20, parent="block")

prompt = '你扮演一个AI,风趣幽默，有耐性。'
with pipeline() as ppl:
    with lazyllm.parallel().sum as ppl.prl:
        prl.r1 = Retriever(doc, group_name='line', similarity="cosine", topk=6, target='block')
        prl.r2 = Retriever(doc, group_name='block', similarity="cosine", topk=6)
    ppl.reranker = Reranker('ModuleReranker', model=online_rerank, output_format='content',join=True,top=3) | bind(query=ppl.input)
    ppl.formatter = (lambda context, query: dict(context_str=str(context), query=query)) | bind(query=ppl.input)
    useprompt = lazyllm.ChatPrompter(prompt, extra_keys=["context_str"])
    # print(useprompt)
    ppl.llm = OnlineChatModule(source='qwen', stream=False,base_url=base_url_http).prompt(useprompt)
# ppl.start()

# agent = lazyllm.OnlineChatModule(
#     source='qwen',
#     base_url=base_url_http,
#     # tools=['search_knowledge_base'],
#     prompt=prompt,
#     # stream=False
# )
query = input("输入问题\n")    
# doc_node_list = retriever(query=query)
# 将query和召回节点中的内容组成dict，作为大模型的输入
# res = agent({"query": query, "context_str": "".join([node.get_content() for node in doc_node_list])})
res = ppl(query)
print(f'回答: {res}')
  


# def search_knowledge_base(query:str):
#     doc_node_list = retriever(query=query)
#     context_str = "".join([node.get_content() for node in doc_node_list])
#     return context_str

# prompt = "什么是好的运营"
# base_url_http = "https://dashscope.aliyuncs.com/compatible-mode"
# agent = lazyllm.OnlineChatModule(
#     source='qwen',
#     base_url=base_url_http,
#     tools=['search_knowledge_base'],
#     prompt=prompt,
#     stream=False
# )
# w  = WebModule(agent,stream=False)
# w.start().wait()





# while True:
#     query = input("query(enter 'quit' to exit): ")
#     if query == "quit":
#         break
#     res = chat.forward(query)
#     print(f"answer: {res}")     