Monster commited on
Commit
a626bed
1 Parent(s): a66cb95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,9 +9,9 @@ from huggingface_hub import hf_hub_download
9
  from llama_cpp import Llama
10
  from llama_cpp import LlamaRAMCache
11
 
12
- hf_hub_download(repo_id="TheBloke/Llama-2-7B-chat-GGML", filename="llama-2-7b-chat.ggmlv3.q4_K_M.bin", local_dir=".")
13
 
14
- llm = Llama(model_path="./llama-2-7b-chat.ggmlv3.q4_K_M.bin", rms_norm_eps=1e-5)
15
 
16
  cache = LlamaRAMCache(capacity_bytes=2 << 30)
17
 
@@ -112,7 +112,7 @@ seafoam = SeafoamCustom()
112
  with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
113
  with gr.Column():
114
  gr.Markdown(
115
- """ ## Meta's Llama 2 7B-chat GGML
116
 
117
  4bit (q4_K_M)
118
 
 
9
  from llama_cpp import Llama
10
  from llama_cpp import LlamaRAMCache
11
 
12
+ hf_hub_download(repo_id="TheBloke/Llama-2-7b-Chat-GGUF", filename="llama-2-7b-chat.Q4_K_M.gguf", local_dir=".")
13
 
14
+ llm = Llama(model_path="./llama-2-7b-chat.Q4_K_M.gguf", rms_norm_eps=1e-5)
15
 
16
  cache = LlamaRAMCache(capacity_bytes=2 << 30)
17
 
 
112
  with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
113
  with gr.Column():
114
  gr.Markdown(
115
+ """ ## Meta's Llama 2 7B-chat
116
 
117
  4bit (q4_K_M)
118