Spaces:
Runtime error
Runtime error
Update gradio_app.py
Browse files- gradio_app.py +2 -2
gradio_app.py
CHANGED
|
@@ -7,13 +7,13 @@ import psutil
|
|
| 7 |
# Initing things
|
| 8 |
print("! INITING LLAMA MODEL !")
|
| 9 |
llm = Llama(model_path="./model.bin") # LLaMa model
|
| 10 |
-
llama_model_name = "
|
| 11 |
print("! INITING DONE !")
|
| 12 |
|
| 13 |
# Preparing things to work
|
| 14 |
title = "llama.cpp API"
|
| 15 |
desc = '''<h1>Hello, world!</h1>
|
| 16 |
-
This is showcase how to make own server with Llama2 model.<br>
|
| 17 |
I'm using here 7b model just for example. Also here's only CPU power.<br>
|
| 18 |
But you can use GPU power as well!<br><br>
|
| 19 |
<h1>How to GPU?</h1>
|
|
|
|
| 7 |
# Initing things
|
| 8 |
print("! INITING LLAMA MODEL !")
|
| 9 |
llm = Llama(model_path="./model.bin") # LLaMa model
|
| 10 |
+
llama_model_name = "NousResearch/Hermes-2-Pro-Mistral-7B-GGUF" # This is just for indication in "three dots menu"
|
| 11 |
print("! INITING DONE !")
|
| 12 |
|
| 13 |
# Preparing things to work
|
| 14 |
title = "llama.cpp API"
|
| 15 |
desc = '''<h1>Hello, world!</h1>
|
| 16 |
+
This is showcase how to make own server with Llama2 model using llama_cpp.<br>
|
| 17 |
I'm using here 7b model just for example. Also here's only CPU power.<br>
|
| 18 |
But you can use GPU power as well!<br><br>
|
| 19 |
<h1>How to GPU?</h1>
|