from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_ollama import OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.vectorstores import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain_ollama.chat_models import ChatOllama
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
from langchain_core.messages import HumanMessage, SystemMessage
import os

base_url = "http://10.12.8.21:11434"
model = "llama3.1"

# 数据加载
loader = TextLoader('resources/rag_demo.txt', encoding="utf-8")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=20, chunk_overlap=10, separators=['\n']
)
chunks = text_splitter.split_documents(documents)
print(chunks)

template = """你是一位问答助手，请你根据以下内容：\n{context}\n，回答下面的问题，如果提供的文本信息无法回答问题，请直接回复"提供的文本无法回答问题"。
问题：{question}
"""
question = "战士金是谁，他喜欢哪些乐队？"

# 写入向量数据库
db = FAISS.from_documents(chunks, OllamaEmbeddings(base_url=base_url, model=model))
retriever = db.as_retriever(search_kwargs={"k": 5})

# 召回和问题相关的文本
context = retriever.invoke(question)
print(context)

context_str = "；".join([d.page_content for d in context])
input_str = template.format_map({"context": context_str, "question": question})
chat = ChatOllama(base_url=base_url, model=model)
messages = [SystemMessage(content="你是一位问答助手"), HumanMessage(content=input_str)]
response = chat.invoke(messages)
print(response.content)
