import configparser
import io
import sys

# 设置标准输出为utf-8编码
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')

from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Zilliz
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter

# connect to milvus
cfp = configparser.RawConfigParser()
cfp.read('config.ini')
milvus_uri = cfp.get('example', 'uri')
token = cfp.get('example', 'token')

# 加载文档
loader = TextLoader("../../data/txt/terms.txt", encoding="utf-8")
documents = loader.load()

# 文本分割
# 按长度chunk，常用的方法
text_splitter = CharacterTextSplitter(chunk_size=50, chunk_overlap=10, length_function=len, is_separator_regex=False,
                                      separator='\n')
docs = text_splitter.split_documents(documents)

# 创建嵌入模型
embeddings = OpenAIEmbeddings()

# 连接到 Milvus 并存储文档
vector_db = Zilliz.from_documents(  # or Milvus.from_documents
    docs, embeddings,  # 存储到collection_1中
    collection_name="terms", connection_args={"uri": milvus_uri, "token": token}, drop_old=True,
    # 如果存在旧集合，则删除 # Drop the old Milvus collection if it exists
    auto_id=True  # 自动生成 ID
)

query = "NVH是什么? 注意，你只要输出中文的部分，输出格式：中文：英文"
results = vector_db.similarity_search(query, k=1)  # 返回最相似的结果
if results:
    print(results[0].page_content)
else:
    print("No matching documents found.")
