# 示例：milvus_zilliz.py
# pip install --upgrade --quiet  pymilvus
import os

from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Zilliz
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter

# 加载文档
loader = TextLoader(r"/Milvus_Zilliz\terms.txt", encoding="GBK")
documents = loader.load()

# 文本分割
# 按长度chunk，常用的方法
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=20, length_function=len, is_separator_regex=False,
                                      separator='\n')
docs = text_splitter.split_documents(documents)

# 创建嵌入模型
embeddings = OpenAIEmbeddings()

# 连接到 Milvus 并存储文档
vector_db = Zilliz.from_documents(  # or Milvus.from_documents
    docs, embeddings,  # 存储到collection_1中
    collection_name="collection_5",
    connection_args={"uri": os.getenv("MILVUS_API_URL"), "token": os.getenv("MILVUS_API_KEY")},
    drop_old=True,    # 如果存在旧集合，则删除 # Drop the old Milvus collection if it exists
    auto_id=True  # 自动生成 ID
)

query = "轿车的英文是?"
results = vector_db.similarity_search(query, k=1)  # 返回最相似的结果
if results:
    print(results[0].page_content)
else:
    print("No matching documents found.")
