Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,6 @@ import gradio as gr
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from llama_cpp import Llama
|
| 6 |
|
| 7 |
-
# میتونی بعداً این دو تا رو عوض کنی تا مدل دیگهای رو تست کنی:
|
| 8 |
REPO_ID = "bartowski/SmolLM2-135M-Instruct-GGUF"
|
| 9 |
FILENAME = "SmolLM2-135M-Instruct-Q4_K_M.gguf"
|
| 10 |
|
|
@@ -16,13 +15,12 @@ def load_llm():
|
|
| 16 |
local_dir=".",
|
| 17 |
local_dir_use_symlinks=False,
|
| 18 |
)
|
| 19 |
-
# تغییرات سرعت: n_ctx کمتر و n_batch اضافه شد
|
| 20 |
llm = Llama(
|
| 21 |
model_path=model_path,
|
| 22 |
-
n_ctx=
|
| 23 |
n_threads=max(2, os.cpu_count() or 2),
|
| 24 |
n_gpu_layers=0,
|
| 25 |
-
n_batch=
|
| 26 |
verbose=False,
|
| 27 |
)
|
| 28 |
return llm
|
|
@@ -43,8 +41,8 @@ def respond(message, history):
|
|
| 43 |
# تغییرات سرعت: max_tokens و temperature
|
| 44 |
stream = llm.create_completion(
|
| 45 |
prompt=prompt,
|
| 46 |
-
max_tokens=120,
|
| 47 |
-
temperature=0.5,
|
| 48 |
top_p=0.9,
|
| 49 |
stop=["[/ASSISTANT]", "[USER]", "\n[USER]"],
|
| 50 |
stream=True,
|
|
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from llama_cpp import Llama
|
| 6 |
|
|
|
|
| 7 |
REPO_ID = "bartowski/SmolLM2-135M-Instruct-GGUF"
|
| 8 |
FILENAME = "SmolLM2-135M-Instruct-Q4_K_M.gguf"
|
| 9 |
|
|
|
|
| 15 |
local_dir=".",
|
| 16 |
local_dir_use_symlinks=False,
|
| 17 |
)
|
|
|
|
| 18 |
llm = Llama(
|
| 19 |
model_path=model_path,
|
| 20 |
+
n_ctx=512
|
| 21 |
n_threads=max(2, os.cpu_count() or 2),
|
| 22 |
n_gpu_layers=0,
|
| 23 |
+
n_batch=32
|
| 24 |
verbose=False,
|
| 25 |
)
|
| 26 |
return llm
|
|
|
|
| 41 |
# تغییرات سرعت: max_tokens و temperature
|
| 42 |
stream = llm.create_completion(
|
| 43 |
prompt=prompt,
|
| 44 |
+
max_tokens=120,
|
| 45 |
+
temperature=0.5,
|
| 46 |
top_p=0.9,
|
| 47 |
stop=["[/ASSISTANT]", "[USER]", "\n[USER]"],
|
| 48 |
stream=True,
|