john commited on
Commit
9891f35
1 Parent(s): ff7ccda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -21,9 +21,11 @@ if __name__ == "__main__":
21
 
22
  print(f"Number of CPU cores: {num_cores}")
23
  print(f"Number of threads available to the current process: {num_threads}")
24
- url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
25
- filename = wget.download(url)
26
- llm2 = Llama(model_path=filename, seed=random.randint(1, 2**31), lora_path="ggml-adapter-model (1).bin", use_mlock=True, n_threads=2)
 
 
27
  filename = wget.download(url)
28
  theme = gr.themes.Soft(
29
  primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),
 
21
 
22
  print(f"Number of CPU cores: {num_cores}")
23
  print(f"Number of threads available to the current process: {num_threads}")
24
+ #url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
25
+ #filename = wget.download(url)
26
+ model_path= hf_hub_download(repo_id="TheBloke/llama2-7b-chat-codeCherryPop-qLoRA-GGML", filename="llama-2-7b-chat-codeCherryPop.ggmlv3.q2_K.bin"), n_ctx=2048, n_threads=2) #download model from hf/ n_ctx=2048 for high ccontext length
27
+
28
+ llm2 = Llama(model_path=model_path, seed=random.randint(1, 2**31), lora_path="ggml-adapter-model (1).bin", use_mlock=True, n_threads=2)
29
  filename = wget.download(url)
30
  theme = gr.themes.Soft(
31
  primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),