### 5.6向量数据库

#### 5.6.1 Chroma
# 引入向量化的类
from langchain_community.vectorstores import Chroma
from langchain.embeddings.dashscope import DashScopeEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader

# 加载文档
doc = TextLoader("../home/doc/NBA新闻.txt", encoding='utf-8').load()

# 初始化文本分割器
spliter = CharacterTextSplitter("\n",chunk_size=100, chunk_overlap=0)

# 分割文档
chunks = spliter.split_documents(doc)

# 实例化
embeddings = DashScopeEmbeddings()

# 创建向量数据库,向量化文档
db = Chroma.from_documents(chunks,embeddings, persist_directory="./chroma")
db.persist()# 持久化
# 添加文档
doc = TextLoader("../home/doc/NBA新闻.txt", encoding='utf-8').load()
spliter = CharacterTextSplitter("\n",chunk_size=200, chunk_overlap=0)
chunks = spliter.split_documents(doc)
# 添加文档的方法
db.add_documents(chunks)
db.__len__()#
# 对数据进行加载
db1 = Chroma(persist_directory="./chroma/zhisk1", embedding_function=embeddings)
db1.__len__()#
# 召回相似的数据块
rets = db.similarity_search("2024冠军球队是谁",k=2)
# 直接拼接prompt
prompt = ""
for ret in rets:
    prompt+=ret.page_content +  "\n"

prompt += "请根据上面内容回答："+"2024冠军球队是谁"
print('--------------------')
print(prompt)

# 引入大模型
from langchain_community.llms import Tongyi

# 实例化
llm = Tongyi()

# 调用大模型
llm.invoke(prompt)

# 检索问答
from langchain.chains import RetrievalQA

# 实例化
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever())
print(qa.invoke("2024年NBA冠军是谁"))
