john commited on
Commit
b96b830
1 Parent(s): 2312dd1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -1
app.py CHANGED
@@ -4,16 +4,26 @@ os.system('CMAKE_ARGS="-DLLAMA_OPENBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-
4
  import wget
5
  from llama_cpp import Llama
6
  import random
 
 
7
 
8
  def get_num_cores():
9
  """Get the number of CPU cores."""
10
  return os.cpu_count()
11
 
 
 
 
 
12
  if __name__ == "__main__":
13
  num_cores = get_num_cores()
 
 
 
 
14
  url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
15
  filename = wget.download(url)
16
- llm2 = Llama(model_path=filename, seed=random.randint(1, 2**31), lora_path="ggml-adapter-model.bin", n_threads=num_cores)
17
  filename = wget.download(url)
18
  theme = gr.themes.Soft(
19
  primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),
 
4
  import wget
5
  from llama_cpp import Llama
6
  import random
7
+ import os
8
+ import multiprocessing
9
 
10
  def get_num_cores():
11
  """Get the number of CPU cores."""
12
  return os.cpu_count()
13
 
14
+ def get_num_threads():
15
+ """Get the number of threads available to the current process."""
16
+ return multiprocessing.cpu_count()
17
+
18
  if __name__ == "__main__":
19
  num_cores = get_num_cores()
20
+ num_threads = get_num_threads()
21
+
22
+ print(f"Number of CPU cores: {num_cores}")
23
+ print(f"Number of threads available to the current process: {num_threads}")
24
  url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q2_K.bin'
25
  filename = wget.download(url)
26
+ llm2 = Llama(model_path=filename, seed=random.randint(1, 2**31), lora_path="ggml-adapter-model.bin")
27
  filename = wget.download(url)
28
  theme = gr.themes.Soft(
29
  primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),