from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import  ChatOpenAI
from langchain_core.prompts import  ChatPromptTemplate
from langchain_community.document_loaders import WebBaseLoader
from langchain.chains.retrieval import create_retrieval_chain
# 网页爬虫
import bs4
from dotenv import load_dotenv
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.chains.combine_documents import create_stuff_documents_chain
import os
load_dotenv('../.env')


loader = WebBaseLoader(
    web_path="https://www.gov.cn/xinwen/2020-06/01/content_5516649.htm",
    bs_kwargs=dict(parse_only=bs4.SoupStrainer(id="UCAP-CONTENT"))
)
docs = loader.load()

# 使用本地嵌入模型 - 中文优化
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")

# 文本分割
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
documents = text_splitter.split_documents(docs)
print("文档分割完成，片段数:", len(documents))
print(documents)
# 创建向量存储
vector = FAISS.from_documents(documents, embeddings)
print("向量存储创建成功!")
print(vector)

prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个助手。根据以下上下文回答问题：\n{context}"),
    ("user", "问题：{input}")
])

llm = ChatOpenAI(
    api_key=os.getenv("DEEPSEEK_API_KEY"),  # 替换为你的 API Key
    base_url=os.getenv("DEEPSEEK_BASE_URL"),  # DeepSeek API 地址
    model="deepseek-chat",  # 模型名称
    temperature=0.5,  # 控制随机性
)
document_chain = create_stuff_documents_chain(llm,prompt)
retriever = vector.as_retriever()
retriever.search_kwargs ={"k": 5} #含义：设置检索时的参数：k=10：表示每次最多返回 10 条与用户问题最相关的文档片段。
retrieval_chain = create_retrieval_chain(retriever,document_chain)
response = retrieval_chain.invoke({"input":"建设用地是什么"})
print(response["answer"])



