from langchain.llms import LlamaCpp #from langchain import HuggingFacePipeline #from langchain.chains import ConversationalRetrievalChain from huggingface_hub import hf_hub_download import psutil import os #offload_path = "offload" def get_chain(vectorstore): if not os.path.exists("ggml-vic7b-q5_1.bin"): hf_hub_download(repo_id="eachadea/ggml-vicuna-7b-1.1", filename="ggml-vic7b-q5_1.bin", local_dir=".") llm = LlamaCpp(model_path="ggml-vic7b-q5_1.bin", n_ctx=2048, n_threads=psutil.cpu_count(logical=False)) #if not os.path.exists(offload_path): # os.makedirs(offload_path) #llm = HuggingFacePipeline.from_model_id(model_id="lmsys/fastchat-t5-3b-v1.0", # task="text2text-generation", # model_kwargs={"max_length":512, # "device_map":"auto", # "offload_folder":"offload" # } # ) qa_chain = ConversationalRetrievalChain.from_llm( llm, vectorstore.as_retriever(), ) return qa_chain