YaTharThShaRma999 commited on
Commit
fb37c8a
1 Parent(s): cf65870

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -25,9 +25,9 @@ if __name__ == "__main__":
25
  print(f"Number of threads available to the current process: {num_threads}")
26
  #url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
27
  #filename = wget.download(url)
28
- model_path= hf_hub_download(repo_id="TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF", filename="tinyllama-1.1b-chat-v1.0.Q4_0.gguf")
29
 
30
- llm2 = Llama(model_path=model_path, seed=random.randint(1, 2**31), use_mlock=False, n_threads=4)
31
  theme = gr.themes.Soft(
32
  primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),
33
  neutral_hue="red",
 
25
  print(f"Number of threads available to the current process: {num_threads}")
26
  #url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
27
  #filename = wget.download(url)
28
+ model_path= hf_hub_download(repo_id="brittlewis12/Octopus-v2-GGUF", filename="octopus-v2.Q4_K_S.gguf
29
 
30
+ llm2 = Llama(model_path=model_path, use_mlock=False)
31
  theme = gr.themes.Soft(
32
  primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),
33
  neutral_hue="red",