Update model.py
Browse files
model.py
CHANGED
@@ -3,15 +3,15 @@ from typing import Iterator
|
|
3 |
|
4 |
|
5 |
|
6 |
-
model_id = '
|
7 |
|
8 |
from huggingface_hub import snapshot_download,hf_hub_download
|
9 |
#旧
|
10 |
#snapshot_download(model_id, local_dir="./",revision="7f71a8abefa7b2eede3f74ce0564abe5fbe6874a")
|
11 |
-
snapshot_download(model_id, local_dir="./",revision="b2414a0ceee68fe09c99ace44446cfc9a1c52b08")
|
12 |
hf_hub_download(repo_id="baichuan-inc/Baichuan-13B-Chat",local_dir="./", filename="tokenizer.model")
|
13 |
from llama_cpp import Llama
|
14 |
-
llm = Llama(model_path="./
|
15 |
|
16 |
def run(message: str,
|
17 |
chat_history: list[tuple[str, str]],
|
|
|
3 |
|
4 |
|
5 |
|
6 |
+
model_id = 'theohlong/baichuan2_13b-GGML'
|
7 |
|
8 |
from huggingface_hub import snapshot_download,hf_hub_download
|
9 |
#旧
|
10 |
#snapshot_download(model_id, local_dir="./",revision="7f71a8abefa7b2eede3f74ce0564abe5fbe6874a")
|
11 |
+
#snapshot_download(model_id, local_dir="./",revision="b2414a0ceee68fe09c99ace44446cfc9a1c52b08")
|
12 |
hf_hub_download(repo_id="baichuan-inc/Baichuan-13B-Chat",local_dir="./", filename="tokenizer.model")
|
13 |
from llama_cpp import Llama
|
14 |
+
llm = Llama(model_path="./ggml-model-q4_1.bin", n_ctx=4096,seed=-1)
|
15 |
|
16 |
def run(message: str,
|
17 |
chat_history: list[tuple[str, str]],
|