import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)

from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.llms import Ollama
from langchain.chains import RetrievalQA
from langchain.text_splitter import RecursiveCharacterTextSplitter
try:
    # 创建文档loader，这里使用text loader
    # langchain提供了大量的loader
    loader = TextLoader(
        file_path="../docoment_loaders/test.txt",encoding="utf-8")
    documents = loader.load()

    # 对文档进行分割
    text_splitter = RecursiveCharacterTextSplitter(
       chunk_size=1000,
       chunk_overlap=200
    )
    splits = text_splitter.split_documents(documents)

    # 使用Ollama的嵌入模型
    embeddings = OllamaEmbeddings(
        base_url='http://192.168.2.208:11434',
        model="llama2")
    # 创建向量数据库,文档指定为上面的分割结果splits
    vectorstore = Chroma.from_documents(
        documents=splits,
        embedding=embeddings,
        persist_directory="../chroma/chroma_db"
    )
    # 使用chat大模型，指定在retriever中回答提问(人类的自然语言)
    llm = Ollama(
        base_url='http://192.168.2.208:11434',
        model="deepseek-r1:1.5b")
    qa_chain = RetrievalQA.from_chain_type(
        llm,
        retriever=vectorstore.as_retriever(),
        chain_type="stuff"  # 简单文档拼接方式
    )
    query = "埃及金字塔最早哪年开始建造？"
    result = qa_chain({"query": query})
    print(result["result"])

except Exception as e:
    error_msg = f"执行错误: {e}"
    print(error_msg)  # Continue 会捕获控制台输出