Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,22 +5,23 @@ from huggingface_hub import hf_hub_download
|
|
5 |
title = "Mistral-7B-Instruct-GGUF Run On CPU-Basic Free Hardware"
|
6 |
|
7 |
description = """
|
8 |
-
π [Mistral AI's Mistral 7B Instruct v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) [GGUF format model](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF)
|
9 |
π¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
|
10 |
-
Mistral does not support system prompt
|
11 |
-
"""
|
12 |
-
|
13 |
-
"""
|
14 |
-
[Model From TheBloke/Mistral-7B-Instruct-v0.1-GGUF](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF)
|
15 |
-
[Mistral-instruct-v0.1 System prompt](https://docs.mistral.ai/usage/guardrailing)
|
16 |
"""
|
17 |
|
18 |
model_path = "models"
|
19 |
model_name = "unsloth.Q4_K_M.gguf"
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
print("Start the model init process")
|
23 |
-
model =
|
24 |
print("Finish the model init process")
|
25 |
|
26 |
model.config["promptTemplate"] = "[INST] {0} [/INST]"
|
@@ -36,7 +37,7 @@ def generater(message, history, temperature, top_p, top_k):
|
|
36 |
prompt += assistant_message + "</s>"
|
37 |
prompt += model.config["promptTemplate"].format(message)
|
38 |
outputs = []
|
39 |
-
for token in model.generate(prompt=prompt, temp=temperature, top_k
|
40 |
outputs.append(token)
|
41 |
yield "".join(outputs)
|
42 |
|
@@ -46,9 +47,9 @@ def vote(data: gr.LikeData):
|
|
46 |
else:
|
47 |
return
|
48 |
|
49 |
-
chatbot = gr.Chatbot(avatar_images=('resourse/user-icon.png', 'resourse/chatbot-icon.png'),bubble_full_width
|
50 |
|
51 |
-
additional_inputs=[
|
52 |
gr.Slider(
|
53 |
label="temperature",
|
54 |
value=0.5,
|
@@ -65,7 +66,7 @@ additional_inputs=[
|
|
65 |
maximum=1.0,
|
66 |
step=0.01,
|
67 |
interactive=True,
|
68 |
-
info="0.1 means only the tokens comprising the top 10% probability mass are considered. Suggest set to 1 and use temperature. 1 means 100% and will disable it",
|
69 |
),
|
70 |
gr.Slider(
|
71 |
label="top_k",
|
@@ -74,7 +75,7 @@ additional_inputs=[
|
|
74 |
maximum=1000,
|
75 |
step=1,
|
76 |
interactive=True,
|
77 |
-
info="
|
78 |
)
|
79 |
]
|
80 |
|
@@ -82,9 +83,9 @@ character = "Sherlock Holmes"
|
|
82 |
series = "Arthur Conan Doyle's novel"
|
83 |
|
84 |
iface = gr.ChatInterface(
|
85 |
-
fn
|
86 |
title=title,
|
87 |
-
description
|
88 |
chatbot=chatbot,
|
89 |
additional_inputs=additional_inputs,
|
90 |
examples=[
|
@@ -101,4 +102,4 @@ with gr.Blocks(css="resourse/style/custom.css") as demo:
|
|
101 |
iface.render()
|
102 |
|
103 |
if __name__ == "__main__":
|
104 |
-
demo.queue(max_size=3).launch()
|
|
|
5 |
title = "Mistral-7B-Instruct-GGUF Run On CPU-Basic Free Hardware"
|
6 |
|
7 |
description = """
|
8 |
+
π [Mistral AI's Mistral 7B Instruct v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) [GGUF format model](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF), 4-bit quantization balanced quality GGUF version, running on CPU. English Only (supports other languages but with reduced quality). Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) and [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all).
|
9 |
π¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
|
10 |
+
Mistral does not support system prompt symbols (such as `<<SYS>>`) now, input your system prompt in the first message if needed. Learn more: [Guardrailing Mistral 7B](https://docs.mistral.ai/usage/guardrailing).
|
|
|
|
|
|
|
|
|
|
|
11 |
"""
|
12 |
|
13 |
model_path = "models"
|
14 |
model_name = "unsloth.Q4_K_M.gguf"
|
15 |
+
|
16 |
+
# Download the model from Hugging Face
|
17 |
+
hf_hub_download(
|
18 |
+
repo_id="vislupus/bulgarian-joke-master-gemma-2-2b-it-bnb-4bit-gguf",
|
19 |
+
filename=model_name,
|
20 |
+
local_dir=model_path
|
21 |
+
)
|
22 |
|
23 |
print("Start the model init process")
|
24 |
+
model = GPT4All(model_name, model_path)
|
25 |
print("Finish the model init process")
|
26 |
|
27 |
model.config["promptTemplate"] = "[INST] {0} [/INST]"
|
|
|
37 |
prompt += assistant_message + "</s>"
|
38 |
prompt += model.config["promptTemplate"].format(message)
|
39 |
outputs = []
|
40 |
+
for token in model.generate(prompt=prompt, temp=temperature, top_k=top_k, top_p=top_p, max_tokens=max_new_tokens, streaming=True):
|
41 |
outputs.append(token)
|
42 |
yield "".join(outputs)
|
43 |
|
|
|
47 |
else:
|
48 |
return
|
49 |
|
50 |
+
chatbot = gr.Chatbot(avatar_images=('resourse/user-icon.png', 'resourse/chatbot-icon.png'), bubble_full_width=False)
|
51 |
|
52 |
+
additional_inputs = [
|
53 |
gr.Slider(
|
54 |
label="temperature",
|
55 |
value=0.5,
|
|
|
66 |
maximum=1.0,
|
67 |
step=0.01,
|
68 |
interactive=True,
|
69 |
+
info="0.1 means only the tokens comprising the top 10% probability mass are considered. Suggest set to 1 and use temperature. 1 means 100% and will disable it.",
|
70 |
),
|
71 |
gr.Slider(
|
72 |
label="top_k",
|
|
|
75 |
maximum=1000,
|
76 |
step=1,
|
77 |
interactive=True,
|
78 |
+
info="Limits candidate tokens to a fixed number after sorting by probability. Setting it higher than the vocabulary size deactivates this limit.",
|
79 |
)
|
80 |
]
|
81 |
|
|
|
83 |
series = "Arthur Conan Doyle's novel"
|
84 |
|
85 |
iface = gr.ChatInterface(
|
86 |
+
fn=generater,
|
87 |
title=title,
|
88 |
+
description=description,
|
89 |
chatbot=chatbot,
|
90 |
additional_inputs=additional_inputs,
|
91 |
examples=[
|
|
|
102 |
iface.render()
|
103 |
|
104 |
if __name__ == "__main__":
|
105 |
+
demo.queue(max_size=3).launch()
|