from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.llms import HuggingFaceHub

# 1. 加载本地文档（示例：加载txt文件）
loader = TextLoader("knowledge.txt", encoding="utf-8")
documents = loader.load()

# 2. 文档分块处理
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,  # 每个块500字符
    chunk_overlap=50  # 块间重叠50字符
)
texts = text_splitter.split_documents(documents)

# 3. 加载免费嵌入模型（Hugging Face）
embeddings = HuggingFaceEmbeddings(
    model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
)

# 4. 构建本地向量数据库
vector_db = FAISS.from_documents(texts, embeddings)
vector_db.save_local("faiss_index")  # 保存索引避免重复计算

# 5. 初始化免费生成模型（Hugging Face Hub）
# 使用Ollama本地运行开源LLM（需先安装Ollama）
from langchain_community.llms import Ollama
llm = Ollama(model="llama3.2")  # 需提前在终端执行`ollama pull llama3.2
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")
from langchain.retrievers import BM25Retriever, EnsembleRetriever
bm25_retriever = BM25Retriever.from_documents(texts)
ensemble_retriever = EnsembleRetriever(
    retrievers=[vector_db.as_retriever(), bm25_retriever],
    weights=[0.7, 0.3]
)
from langchain_community.cross_encoders import HuggingFaceCrossEncoder
cross_encoder = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base")
# 6. 构建检索增强生成链
qa_chain = RetrievalQA.from_chain_type(
    llm=llm,
    retriever=vector_db.as_retriever(search_kwargs={"k": 3}),
    return_source_documents=True
)

# 7. 提问并获取答案
question = "公司年假政策如何规定？"
result = qa_chain.invoke({"query": question})

print(f"问题：{question}")
print(f"答案：{result['result']}")
print("来源文档：")
for doc in result['source_documents']:
    print(f"- {doc.metadata['source']} (页码：{doc.metadata.get('page', 'N/A')})")