from langchain.document_loaders import TextLoader
loader = TextLoader("./data/read.txt", encoding='utf-8')
documents = loader.load()

# 文档分割
from langchain.text_splitter import CharacterTextSplitter

# 创建拆分器
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=10)
# 拆分文档
documents = text_splitter.split_documents(documents)

from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.vectorstores import Chroma

# embedding model: m3e-base
model_name = "moka-ai/m3e-base"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceBgeEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs
)


# 指定 persist_directory 将会把嵌入存储到磁盘上。
persist_directory = 'db'
db = Chroma.from_documents(documents, embedding, persist_directory=persist_directory)

# ===============重新启动后加载db======================================================
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.vectorstores import Chroma

# 创建嵌入对象
model_name = "moka-ai/m3e-base"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceBgeEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs
)

# 加载持久化的向量存储
persist_directory = 'db'

try:
    db = Chroma(persist_directory=persist_directory, embedding_function=embedding)
    print("Chroma vector store loaded successfully.")
except Exception as e:
    print(f"Error loading Chroma vector store: {e}")
#检索(Retrieve)向量数据库被填充后，可以将其定义为检索器组件，该组件根据用户查询与嵌入式块之间的语义相似性获取附加上下文。
retriever = db.as_retriever()


# 增强(Augment)
from langchain.prompts import ChatPromptTemplate

template = """您是问答任务的助理。
使用以下检索到的上下文来回答问题。
如果你不知道答案，就说你不知道。
最多使用三句话，保持答案简洁。
问题: {question} 
内容: {context} 
答案:
"""
prompt = ChatPromptTemplate.from_template(template)
print(prompt)

from langchain_community.chat_models import ChatOllama
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser

llm = ChatOllama(model='kollcn/llama3-8b-chinese-chat-f16-v2')

rag_chain = (
        {"context": retriever, "question": RunnablePassthrough()}
        | prompt
        | llm
        | StrOutputParser()
)

query = "新款极氪解决了21款的哪些问题？"
response = rag_chain.invoke(query)
print(response)

# db.similarity_search("puzzled by")