adityakumar commited on
Commit
f92fa28
1 Parent(s): 154174a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -82,7 +82,7 @@ def main():
82
 
83
  # loading local llama model
84
  llm = CTransformers(#model="models/llama-2-7b-chat.ggmlv3.q8_0.bin",
85
- model="models/llama-2-7b-chat.ggmlv3.q4_0.bin",
86
  model_type="llama",
87
  #callbacks=[StreamingStdOutCallbackHandler()],
88
  config={'max_new_tokens': 1024,
 
82
 
83
  # loading local llama model
84
  llm = CTransformers(#model="models/llama-2-7b-chat.ggmlv3.q8_0.bin",
85
+ model="TheBloke/Llama-2-7B-Chat-GGML",
86
  model_type="llama",
87
  #callbacks=[StreamingStdOutCallbackHandler()],
88
  config={'max_new_tokens': 1024,