john commited on
Commit
a534500
1 Parent(s): 9891f35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -23,7 +23,7 @@ if __name__ == "__main__":
23
  print(f"Number of threads available to the current process: {num_threads}")
24
  #url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
25
  #filename = wget.download(url)
26
- model_path= hf_hub_download(repo_id="TheBloke/llama2-7b-chat-codeCherryPop-qLoRA-GGML", filename="llama-2-7b-chat-codeCherryPop.ggmlv3.q2_K.bin"), n_ctx=2048, n_threads=2) #download model from hf/ n_ctx=2048 for high ccontext length
27
 
28
  llm2 = Llama(model_path=model_path, seed=random.randint(1, 2**31), lora_path="ggml-adapter-model (1).bin", use_mlock=True, n_threads=2)
29
  filename = wget.download(url)
 
23
  print(f"Number of threads available to the current process: {num_threads}")
24
  #url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
25
  #filename = wget.download(url)
26
+ model_path= hf_hub_download(repo_id="TheBloke/llama2-7b-chat-codeCherryPop-qLoRA-GGML", filename="llama-2-7b-chat-codeCherryPop.ggmlv3.q2_K.bin")
27
 
28
  llm2 = Llama(model_path=model_path, seed=random.randint(1, 2**31), lora_path="ggml-adapter-model (1).bin", use_mlock=True, n_threads=2)
29
  filename = wget.download(url)