import lazyllm
from lazyllm import (
    fc_register, Document, Retriever, 
    OnlineEmbeddingModule, OnlineChatModule, WebModule,
    ReactAgent,Reranker,SentenceSplitter,pipeline
)
base_url_http = "https://dashscope.aliyuncs.com/compatible-mode"
doc_path="F:/file/my/workplace/agentLazyLLM/lazyllm-venv/docs"


online_rerank = OnlineEmbeddingModule(source='qwen',type="rerank",model="qwen-plus")
doc = Document(dataset_path=doc_path,embed=online_rerank)

prompt = '你扮演一个AI,风趣幽默，有耐性。'

# doc.create_node_group(name="block", transform=SentenceSplitter, chunk_size=1024, chunk_overlap=100)
# doc.create_node_group(name="line", transform=SentenceSplitter, chunk_size=128, chunk_overlap=20, parent="block")

with pipeline() as ppl:
    ppl.retriver = Retriever(doc, group_name='CoarseChunk',similarity="bm25_chinese", topk=6)
    # Retriever(doc, group_name='block', similarity="cosine", topk=6)
    # with lazyllm.parallel().sum as ppl.prl:
    #     prl.r1 = Retriever(doc, group_name='line', similarity="cosine", topk=6, target='block')
    #     prl.r2 = Retriever(doc, group_name='block', similarity="cosine", topk=6)
    # ppl.reranker = Reranker('ModuleReranker', model=online_rerank, output_format='content',join=True,top=3) | bind(query=ppl.input)
    ppl.formatter = (lambda context, query: dict(context_str=str(context), query=query)) | bind(query=ppl.input)
    useprompt = lazyllm.ChatPrompter(prompt, extra_keys=["context_str"])
    ppl.llm = OnlineChatModule(source='qwen', stream=False,base_url=base_url_http).prompt(useprompt)

# ppl.start()
# print(ppl)
# query = input("输入问题\n")    
# res = ppl(query)
# print(f'回答: {res}')


while True:
    query = input("query(enter 'quit' to exit): ")
    if query == "quit":
        break
    res = ppl(query)
    print(f"answer: {res}")   