from langchain_community.document_loaders import WebBaseLoader
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

api_key = "34a3cc1f-3608-4c2e-8cbe-6f2076c30126"
model = "ep-20240606130022-nrz5h"

WEB_URL = "https://zhuanlan.zhihu.com/p/85289282" # 使用WebBaseLoader加载HTML
loader = WebBaseLoader(WEB_URL)
docs = loader.load()
# 导入千帆向量模型
embeddings = OpenAIEmbeddings(
    openai_api_key=api_key,
    model="ep-20241106171640-d9rjj",
    openai_api_base="https://ark.cn-beijing.volces.com/api/v3",
    check_embedding_ctx_length=False
)
# 导入递归字符文本分割器
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 384, chunk_overlap = 0, separators=["\n\n", "\n", " ", "", "。", "，"])
# 导入文本
documents = text_splitter.split_documents(docs)
# documents to texts
texts = [doc.page_content for doc in documents]
print(texts)

embed_documents = embeddings.embed_documents(texts, 1024)
print("向量维度：", len(embeddings.embed_query("你好")))
# 存入向量数据库
vector = Chroma.from_documents(documents, embeddings)
