import logging
from http.client import responses

from click import prompt
from langchain.chains.summarize.refine_prompts import prompt_template
from langchain.chat_models import init_chat_model
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.stores import InMemoryStore
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter

from entity.schema.rag_schema import RagInput


async def rag_query_by_file(api_key: str, user_input: str):
    model = init_chat_model(model="deepseek-v3",
                            model_provider="openai",
                            api_key=api_key,
                            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
                            temperature=0.5,
                            max_tokens=200
                            )

    loader = TextLoader("D:/hbut/fastbase/小米yu7.txt",encoding="utf-8")
    docs = loader.load()

    text_split = RecursiveCharacterTextSplitter(chunk_size=50, chunk_overlap=20)
    chunks = text_split.split_documents(docs)

    embeddings = DashScopeEmbeddings(
        model="text-embedding-v3", dashscope_api_key=api_key
    )

    vector_store = InMemoryVectorStore.from_documents(embedding=embeddings,documents=chunks)
    retriever = vector_store.as_retriever()
    retriever_trunks = retriever.invoke(user_input)

    text_chunks =  []

    for i in retriever_trunks:
        text_chunks.append(i.page_content)
        text_chunks.append(str(AIMessage(content=i.page_content)))

        prompt_text = """
        你是一个智能助手，根据用户的问题在提供的片段中找到相关信息并回答问题

        ###片段
        {chunks}
        ###

        """

        prompt_template = ChatPromptTemplate.from_messages(
            [
                ("system", prompt_text),
                MessagesPlaceholder("text_chunks"),
                ("human", "请回答以下问题:{questions}")
            ]
        )

        messages = prompt_template.invoke({

            "chunks": text_chunks,
            "text_chunks": text_chunks,
            "questions": user_input

        })

        responses = model.invoke(input=messages)
        logging.info(f"本次思考结果为{responses.content}")

    return responses.content
