from langchain_community.document_loaders import UnstructuredEPubLoader
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.llms import Tongyi
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_chroma import Chroma

# Post-processing
def combine_page_content(docs):
    return "\n\n".join(doc.page_content for doc in docs)

# Load documents
loader = UnstructuredEPubLoader("data/格林童话.epub", mode="elements")
pages = loader.load_and_split()

r_splitter = RecursiveCharacterTextSplitter(
    chunk_size=300,
    chunk_overlap=20,
    # separators=['\n'], # 不需要设置，默认为["\n\n", "\n", " ", ""]
    # is_separator_regex=True
)
docs = r_splitter.split_documents(pages)
embed_model = HuggingFaceEmbeddings(model_name='infgrad/stella-base-zh-v3-1792d')
vdb = Chroma.from_documents(docs, embed_model, persist_directory="green-fairy-tales")

#作为检索器
retriever = vdb.as_retriever()
question="请介绍这本书的主要内容。"
result = retriever.get_relevant_documents(question)
print(result)
context = combine_page_content(result)

llm = Tongyi(model_name="qwen-turbo") # 还可以使用qwen-plus
prompt = PromptTemplate(
    template="""
    请根据给定的上下文信息回答下面的问题。如果上下文中没有给出相关信息的话，请回答“没有相关信息！”
    上下文：{context}
    问题：{question}
    """,
    input_variables=['context','question']
)
rag_chain = prompt | llm | StrOutputParser()
generation=rag_chain.invoke({'context':context, 'question':question})
print(generation)

# 处理epub文件的时候出现了错误：
# Traceback (most recent call last):
#   File "D:\CondaData\envs\unstructured\lib\site-packages\langchain_chroma\vectorstores.py", line 328, in add_texts
#     self._collection.upsert(
#   File "D:\CondaData\envs\unstructured\lib\site-packages\chromadb\api\models\Collection.py", line 477, in upsert
#     ) = self._validate_embedding_set(
#   File "D:\CondaData\envs\unstructured\lib\site-packages\chromadb\api\models\Collection.py", line 554, in _validate_embedding_set
#     validate_metadatas(maybe_cast_one_to_many_metadata(metadatas))
#   File "D:\CondaData\envs\unstructured\lib\site-packages\chromadb\api\types.py", line 310, in validate_metadatas
#     validate_metadata(metadata)
#   File "D:\CondaData\envs\unstructured\lib\site-packages\chromadb\api\types.py", line 278, in validate_metadata
#     raise ValueError(
# ValueError: Expected metadata value to be a str, int, float or bool, got ['zho'] which is a <class 'list'>
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
#   File "D:\Programs\rag-langchain\qa-green.py", line 25, in <module>
#     vdb = Chroma.from_documents(docs, embed_model, persist_directory="green-fairy-tales")
#   File "D:\CondaData\envs\unstructured\lib\site-packages\langchain_chroma\vectorstores.py", line 791, in from_documents
#     return cls.from_texts(
#   File "D:\CondaData\envs\unstructured\lib\site-packages\langchain_chroma\vectorstores.py", line 749, in from_texts
#     chroma_collection.add_texts(
#   File "D:\CondaData\envs\unstructured\lib\site-packages\langchain_chroma\vectorstores.py", line 340, in add_texts
#     raise ValueError(e.args[0] + "\n\n" + msg)
# ValueError: Expected metadata value to be a str, int, float or bool, got ['zho'] which is a <class 'list'>
#
# Try filtering complex metadata from the document using langchain_community.vectorstores.utils.filter_complex_metadata.

