import os
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Milvus
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA

# 设置环境变量
os.environ["OPENAI_API_KEY"] = "your_openai_api_key"
MILVUS_HOST = "localhost"
MILVUS_PORT = "19530"

# 1. 数据加载
loader = TextLoader('example.txt')
documents = loader.load()

# 2. 文本分割
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)

# 3. 文本嵌入
# 使用开源的 HuggingFace 嵌入模型
embedding_model_name = "all-MiniLM-L6-v2"
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)

# 4. 向量存储
vector_db = Milvus.from_documents(
    docs,
    embeddings,
    connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT},
)

# 5. 检索
retriever = vector_db.as_retriever()

# 6. 生成
llm = ChatOpenAI()
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)

# 测试查询
query = "What is the main idea of the document?"
result = qa.run(query)
print(result)
