pdfchat / query_data.py
fakezeta
cleaned comments
88278c4
raw
history blame
651 Bytes
from langchain.llms import LlamaCpp
from langchain.chains import ConversationalRetrievalChain
from huggingface_hub import hf_hub_download
import psutil
import os
def get_chain(vectorstore):
if not os.path.exists("ggml-vic7b-q5_1.bin"):
hf_hub_download(repo_id="eachadea/ggml-vicuna-7b-1.1", filename="ggml-vic7b-q5_1.bin", local_dir=".")
llm = LlamaCpp(model_path="ggml-vic7b-q5_1.bin", n_ctx=2048, n_threads=psutil.cpu_count(logical=False))
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
vectorstore.as_retriever(),
# condense_question_prompt=CONDENSE_QUESTION_PROMPT,
)
return qa_chain