from langchain.chat_models import init_chat_model
import logging

from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter


async def rag_query_by_file(user_input:str):
    model=init_chat_model(
        model="deepseek-v3",
        model_provider="openai",
        api_key="",
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        temperature=0.5,
        max_tokens=800
    )

    #loader=TextLoader("D:/python_adress/test.txt")
    loader = TextLoader("D:/python_adress/test.txt", encoding="utf-8")
    docs=loader.load()
    logging.info(f"文件加载后内容是{docs}")
    text_splitter=RecursiveCharacterTextSplitter(chunk_size=50,chunk_overlap=20)
    chunks=text_splitter.split_documents(docs)

    embeddings=DashScopeEmbeddings(
        model="text-embedding-v3",
        dashscope_api_key="sk-c562521015d94e2f99c257aefdd0aaba")

    vector_store=InMemoryVectorStore.from_documents(embedding=embeddings,documents=chunks)
    retriever=vector_store.as_retriever()
    retriever_chunks=retriever.invoke(user_input)
    logging.info(f"检索出来的内容是{retriever_chunks}")

    text_chunks=[]
    for doc in retriever_chunks:
        text_chunks.append(doc.page_content)

    prompt_content = """
        你是一个智能助手，你的任务是根据用户的问题，从文本片段中找出相关内容并回答

        ### 文本片段
        {chunks}
        ###
        """

    prompt_template=ChatPromptTemplate.from_messages(
        [
            ("system",prompt_content),
            ("human","请回答以下问题{question}")
        ]
    )

    messages=prompt_template.invoke(
        {
            "chunks":text_chunks,
            "question":user_input
        }
    )

    response=model.invoke(input=messages)
    logging.info(f"本轮的思考内容结果{response.content}")

    return response.content











async def rag_query_map_reduce(user_input: str):
    model = init_chat_model(
        model="deepseek-v3",
        model_provider="openai",
        api_key="",
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        temperature=0.5,
        max_tokens=800
    )

    #loader = TextLoader("D:/python_adress/test.txt")
    loader = TextLoader("D:/python_adress/test.txt", encoding="utf-8")
    docs = loader.load()
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50)
    chunks = text_splitter.split_documents(docs)

    embeddings = DashScopeEmbeddings(
        model="text-embedding-v3",
        dashscope_api_key="sk-c562521015d94e2f99c257aefdd0aaba"
    )

    vector_store = InMemoryVectorStore.from_documents(embedding=embeddings, documents=chunks)
    retriever = vector_store.as_retriever()
    retrieved_chunks = retriever.invoke(user_input)

    partial_answers = []
    for chunk in retrieved_chunks:
        prompt = f"""
        你是一个智能助手，下面是一个文档片段，请根据它回答用户的问题：

        文本片段：
        {chunk.page_content}

        用户问题：{user_input}
        """
        response = model.invoke(prompt)
        partial_answers.append(response.content)

    # Reduce阶段：总结多个回答
    final_prompt = f"""
    以下是多个文档片段回答用户问题的内容，请总结出最终答案：

    {"".join([f"回答{i+1}：{ans}" for i, ans in enumerate(partial_answers)])}

    用户问题：{user_input}
    """

    final_response = model.invoke(final_prompt)
    return final_response.content
