#向量存储
from langchain_community.document_loaders import weBaseLoader, WebBaseLoader
from dotenv import load_dotenvn
from openai import embeddings

load_dotenvn()
import bs4
#网页爬虫
loader = WebBaseLoader(wed_path="https://www.gov.cn/xinwen/2020-06/01/content_5516649.htm",
                       bs_kwargs=dict(parse_only=bs4.SoupStrainer(id="UCAP_CONTENT")))
docs = loader.load()
#print(docs)

from langchain_openai import OpenAIEmbeddings

embeddings = OpenAIEmbeddings()

from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
document = text_splitter.split_documents(docs)
print(len(document))
vector = FAISS.from_documents(document, embeddings)



