File size: 651 Bytes
6feb027
 
 
 
 
 
 
 
 
 
 
 
 
88278c4
6feb027
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from langchain.llms import LlamaCpp
from langchain.chains import ConversationalRetrievalChain
from huggingface_hub import hf_hub_download

import psutil
import os




def get_chain(vectorstore):
    if not os.path.exists("ggml-vic7b-q5_1.bin"):
        hf_hub_download(repo_id="eachadea/ggml-vicuna-7b-1.1", filename="ggml-vic7b-q5_1.bin", local_dir=".")
    llm = LlamaCpp(model_path="ggml-vic7b-q5_1.bin", n_ctx=2048, n_threads=psutil.cpu_count(logical=False))
    qa_chain = ConversationalRetrievalChain.from_llm(
        llm,
        vectorstore.as_retriever(),
#        condense_question_prompt=CONDENSE_QUESTION_PROMPT,
    )
    return qa_chain