import chromadb
from langchain_community.llms import Ollama
from langchain_community.embeddings import OllamaEmbeddings
from langchain import hub
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_text_splitters import RecursiveCharacterTextSplitter,CharacterTextSplitter
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.document_loaders import TextLoader

#模型
llm = Ollama(model="qwen:14b")
embed_model = OllamaEmbeddings(model="milkey/dmeta-embedding-zh:f16")

#加载与读取文档
loader = DirectoryLoader('F:\\datas\\nlp\\jianli\\')
documents = loader.load()

#分割文档
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0)
splits = text_splitter.split_documents(documents)

#准备向量存储
chroma = chromadb.HttpClient(host="localhost", port=8000)
# chroma.delete_collection(name="ragdb")
collection = chroma.get_or_create_collection(name="rb1", metadata={"hnsw:space": "cosine"})
db = Chroma(client=chroma,collection_name="rb1",embedding_function=embed_model)

#存储到向量库中，构造索引
db.add_documents(splits)

#使用检索器
retriever = db.as_retriever()

#构造一个RAG“链”（使用LangChain框架特有的组件与表达语言）
prompt = hub.pull("rlm/rag-prompt")
rag_chain = (
    {"context": retriever | (lambda docs: "\n\n".join(doc.page_content for doc in docs)), "question": RunnablePassthrough()}
    | prompt
    | llm
    | StrOutputParser()
)

while True:
    user_input = input("问题：")
    if user_input.lower() == "exit":
        break

    response = rag_chain.invoke(user_input)
    print("AI助手：", response)
