from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_openai import  ChatOpenAI
from langchain_core.prompts import  ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser,JsonOutputParser
from langchain_community.document_loaders import WebBaseLoader
import bs4
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter


loader = WebBaseLoader(
    web_path="https://www.gov.cn/xinwen/2020-06/01/content_5516649.htm",
    bs_kwargs=dict(parse_only=bs4.SoupStrainer(id="UCAP-CONTENT"))
)
docs = loader.load()

# 使用本地嵌入模型 - 中文优化
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-zh-v1.5")

# 文本分割
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
documents = text_splitter.split_documents(docs)
print("文档分割完成，片段数:", len(documents))
print(documents)
# 创建向量存储 - 这会正常工作
vector = FAISS.from_documents(documents, embeddings)
print("向量存储创建成功!")
print(vector)


print("向量个数:", vector.index.ntotal)

results = vector.similarity_search("中华人民", k=3)
for i, doc in enumerate(results):
    print(f"第 {i+1} 个相似文本：")
    print(doc.page_content)
    print("________________________________________")