# 根据不同类型的数据(网页html,PDF,WORD等)生成向量数据库
# pip install html2text unstructured onnxruntime
# pip install chroma-hnswlib==0.7.5 chromadb==0.5.4
# pip install onnxruntime
import os

from langchain_community.document_loaders import WebBaseLoader, AsyncHtmlLoader
from langchain_community.document_transformers import Html2TextTransformer
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import SentenceTransformerEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
import onnxruntime
print('版本:', onnxruntime.__version__)

os.environ['http_proxy'] = '127.0.0.1:7890'
os.environ['https_proxy'] = '127.0.0.1:7890'

os.environ["OPENAI_API_KEY"] = "sk-1dd16a258a73428d910d38c782e1c94f"
# os.environ["OPENAI_API_KEY"] = "sk-y7KZj0I5wO10Zz3fsVPHj04QhXQtZjxl1FDm0SMEeqUBNqda"

url = "https://docs.spring.io/spring-cloud-gateway/reference/"
loader = WebBaseLoader(url)
docs = loader.load()  # 生成Document列表（每个Document包含网页文本和metadata）

# urls = ["https://docs.spring.io/spring-cloud-gateway/reference/"]
# loader = AsyncHtmlLoader(urls)
# docs = loader.load()
docs = Html2TextTransformer().transform_documents(docs)  # 转换为纯文本
print(docs[0])


# 加载文档并分块
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,      # 每块文本长度
    chunk_overlap=50,    # 块之间重叠长度
    separators=["\n\n", "\n", "。", " "]  # 按段落/句子分割
)
docs = text_splitter.split_documents(docs)

# 生成向量库 嵌入文本
# embeddings = SentenceTransformerEmbeddings(model_name="chinese-alpaca-2-7b-16k")
# embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
embeddings = OpenAIEmbeddings()
# 持久化到磁盘
vector_db = Chroma.from_documents(docs, embeddings, persist_directory="./chroma_db")

# 编写查询
query = "Spring Cloud Gateway configuration"
# results = vector_db.similarity_search_with_score(query)
results = vector_db.similarity_search(query)
print("*"*20)
print(results)
# vector_db.persist()