from langchain_community.document_loaders import PyPDFLoader
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.llms import Tongyi
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_chroma import Chroma

# Post-processing
def combine_page_content(docs):
    return "\n\n".join(doc.page_content for doc in docs)

# Load documents
loader = PyPDFLoader("data/中小企业数字化转型指南.pdf")
pages = loader.load_and_split()

r_splitter = RecursiveCharacterTextSplitter(
    chunk_size=250,
    chunk_overlap=20,
    # separators=['\n'], # 不需要设置，默认为["\n\n", "\n", " ", ""]
    # is_separator_regex=True
)
docs = r_splitter.split_documents(pages)
embed_model = HuggingFaceEmbeddings(model_name='infgrad/stella-base-zh-v3-1792d')
vdb = Chroma.from_documents(docs, embed_model)

# 直接查询
# query = "实施原则有哪些？"
# result = vdb.similarity_search(query,5)
# print(result)

#作为检索器
retriever = vdb.as_retriever()
result = retriever.get_relevant_documents("请解释计算机辅助设计。")
print(result)
context = combine_page_content(result)

llm = Tongyi(model_name="qwen-turbo") # 还可以使用qwen-plus
prompt = PromptTemplate(
    template="""
    请根据给定的上下文回答下面的问题，如果上下文中没有给出相关信息的话，请回答“没有相关信息！”
    上下文：{context}
    问题：{question}
    """,
    input_variables=['context','question']
)
rag_chain = prompt | llm | StrOutputParser()
generation=rag_chain.invoke({'context':context, 'question':"请解释计算机辅助设计。"})
print(generation)
