from langchain.document_loaders import OnlinePDFLoader
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import Chroma
from langchain import PromptTemplate
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import RetrievalQA
from ChatGLM_new import ollama_llm,tongyi_embeddings,xinhuo_embeddings,ollama_emb,kimi_llm
from langchain_community.embeddings import ModelScopeEmbeddings
import sys
import os
from text2vec import SentenceModel
class SuppressStdout:
    def __enter__(self):
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr
        sys.stdout = open(os.devnull, 'w')
        sys.stderr = open(os.devnull, 'w')

    def __exit__(self, exc_type, exc_val, exc_tb):
        sys.stdout.close()
        sys.stdout = self._original_stdout
        sys.stderr = self._original_stderr






model_id = "D:/06git/01python/02vanna/03Chroma/embedding/damo/nlp_corom_sentence-embedding_chinese-base"

embeddings = ModelScopeEmbeddings(model_id=model_id)

# load the pdf and split it into chunks
#loader = OnlinePDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf")
file_path = "smartpole.pdf"
loader = PyPDFLoader(file_path)

data = loader.load()

from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)

all_splits = text_splitter.split_documents(data)

ids = [str(i) for i in range(len(all_splits))]

with SuppressStdout():
    vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings, ids=ids)

while True:
    query = input("\nQuery: ")
    if query == "exit":
        break
    if query.strip() == "":
        continue

    # Prompt
    template = """使用以下上下文来回答最后的问题.
    如果你不知道答案，就说你不知道，不要试图编造一个答案.
    最多使用三句话，并尽可能保持答案简洁明了.
    {context}
    Question: {question}
    Helpful Answer:"""
    QA_CHAIN_PROMPT = PromptTemplate(
        input_variables=["context", "question"],
        template=template,
    )

    llm = kimi_llm
    qa_chain = RetrievalQA.from_chain_type(
        llm,
        retriever=vectorstore.as_retriever(),
        chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
    )

    result = qa_chain.invoke({"query": query})
    print(result)