from langchain_text_splitters import TokenTextSplitter
from src.gen_file.prompt import SOPPrompt
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from aimodel import tongyi_model

sop_prompt = SOPPrompt()


# def gen_chain_sop(query: str):
#     chain = RunnablePassthrough() | tongyi_model | StrOutputParser()
#     return chain.invoke(query)

def handle_long_text(query, text, chunk_size=2000, chunk_overlap=500):
    # splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    # text_chunks = splitter.split_text(text)
    # gen_sop_list = []
    # for chunk in text_chunks:
    #     gen_sop_prompt = sop_prompt.extract_info_prompt(article=chunk, query=query)
    #     gen_sop_list.append(gen_sop_prompt)
    prompt = (
                "你是一位擅长摘抄文章的员工。以下是一篇文章：\n"
                f"{text}\n"
                "文章中有各条线的工艺信息，这些信息有的是所有线共有的，有的是每条线特有的，现在请您将"
                f"{query}"
                "的特有的详细信息从文章中找出。返回开头和结束的位置段落。给出如下示例，按照示例格式返回:\n"
                '[{"start_position": "开头", "end_position": "结束"}, {"start_position": "开头", "end_position": "结束"},]\n'
                "start_position代表找到信息的开头位置, end_position代表找到信息的结束位置。"
        )
    chain = RunnablePassthrough() | tongyi_model | StrOutputParser()
    for t in chain.stream(prompt):
        print(t, end="")


if __name__ == "__main__":
    import time
    start = time.time()
    from src.tools.dms_operator import dmsoperator
    pitext = dmsoperator.get_fileconversion(
        "Files\\202411\\20241104084040kagk56eF16029bf0c615b24a981fa3539cd9fd774d.docx", 4).replace(
        'style="text-align: center;"', "").replace('style="text-align: left;"', "").replace(
        'style="text-align: right;"', "")
    print(pitext)
    handle_long_text("B线氯化钠500ml:4.5g", pitext)
    end = time.time()
    print(end - start)



