from langchain_ollama import OllamaLLM
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA

model_name = "llama3.1:latest"
# 假设这里是你的数据字符串，你可以替换为实际内容
your_data = "In Chongqing, Alan is Cao-zongying"

# 使用字符串变量创建虚拟的Document对象（这里简化处理，假设一个文档）
from langchain.schema import Document
data = [Document(page_content=your_data)]

# 分割文档
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)

# 创建嵌入和向量数据库
oembed = OllamaEmbeddings(base_url="http://localhost:11434", model=model_name)
vectorstore = Chroma.from_documents(documents=all_splits, embedding=oembed)

# 创建模型并提问
ollama = OllamaLLM(
    base_url='http://localhost:11434',
    model=model_name
)
question = "Who is Alan in Chongqing."
docs = vectorstore.similarity_search(question)
qachain = RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
res = qachain.invoke({"query": question})
print(res['result'])
