Spaces:
Running
on
Zero
Running
on
Zero
macadeliccc
commited on
Commit
•
f7d8c6a
1
Parent(s):
ce3745c
Update app.py
Browse files
app.py
CHANGED
@@ -1,58 +1,82 @@
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
-
from
|
5 |
-
from transformers import
|
|
|
6 |
|
7 |
-
#
|
8 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
14 |
|
|
|
|
|
15 |
@spaces.GPU
|
16 |
-
def
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
+
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
6 |
+
from threading import Thread
|
7 |
|
8 |
+
# Lazy loading the model to meet huggingface stateless GPU requirements
|
|
|
9 |
|
10 |
+
# Defining a custom stopping criteria class for the model's text generation.
|
11 |
+
class StopOnTokens(StoppingCriteria):
|
12 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
13 |
+
stop_ids = [50256, 50295] # IDs of tokens where the generation should stop.
|
14 |
+
for stop_id in stop_ids:
|
15 |
+
if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token.
|
16 |
+
return True
|
17 |
+
return False
|
18 |
|
19 |
+
|
20 |
+
# Function to generate model predictions.
|
21 |
@spaces.GPU
|
22 |
+
def predict(message, history):
|
23 |
+
torch.set_default_device("cuda")
|
24 |
+
|
25 |
+
# Loading the tokenizer and model from Hugging Face's model hub.
|
26 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
27 |
+
"macadeliccc/laser-dolphin-mixtral-2x7b-dpo",
|
28 |
+
trust_remote_code=True
|
29 |
+
)
|
30 |
+
model = AutoModelForCausalLM.from_pretrained(
|
31 |
+
"macadeliccc/laser-dolphin-mixtral-2x7b-dpo",
|
32 |
+
torch_dtype="auto",
|
33 |
+
load_in_4bit=True,
|
34 |
+
trust_remote_code=True
|
35 |
+
)
|
36 |
+
history_transformer_format = history + [[message, ""]]
|
37 |
+
stop = StopOnTokens()
|
38 |
+
|
39 |
+
# Formatting the input for the model.
|
40 |
+
system_prompt = "<|im_start|>system\nYou are Dolphin, a helpful AI assistant.<|im_end|>"
|
41 |
+
messages = system_prompt + "".join(["".join(["\n<|im_start|>user\n" + item[0], "<|im_end|>\n<|im_start|>assistant\n" + item[1]]) for item in history_transformer_format])
|
42 |
+
input_ids = tokenizer([messages], return_tensors="pt").to('cuda')
|
43 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
44 |
+
generate_kwargs = dict(
|
45 |
+
input_ids,
|
46 |
+
streamer=streamer,
|
47 |
+
max_new_tokens=1024,
|
48 |
+
do_sample=True,
|
49 |
+
top_p=0.95,
|
50 |
+
top_k=50,
|
51 |
+
temperature=0.7,
|
52 |
+
num_beams=1,
|
53 |
+
stopping_criteria=StoppingCriteriaList([stop])
|
54 |
+
)
|
55 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
56 |
+
t.start() # Starting the generation in a separate thread.
|
57 |
+
partial_message = ""
|
58 |
+
for new_token in streamer:
|
59 |
+
partial_message += new_token
|
60 |
+
if '<|im_end|>' in partial_message: # Breaking the loop if the stop token is generated.
|
61 |
+
break
|
62 |
+
yield partial_message
|
63 |
+
|
64 |
+
|
65 |
+
# Setting up the Gradio chat interface.
|
66 |
+
gr.ChatInterface(predict,
|
67 |
+
description="""
|
68 |
+
<center><img src="https://huggingface.co/macadeliccc/laser-dolphin-mixtral-2x7b-dpo/resolve/main/dolphin_moe.png" width="33%"></center>\n\n
|
69 |
+
Chat with [macadeliccc/SOLAR-math-2x10.7b-v0.2](https://huggingface.co/macadeliccc/SOLAR-math-2x10.7b-v0.2), the first Mixture of Experts made by merging two fine-tuned [upstage/SOLAR-10.7B-v1.0](https://huggingface.co/upstage/SOLAR-10.7B-v1.0) models.
|
70 |
+
This model (19.2B param) scores top 5 on several evaluations. Output is considered experimental.\n\n
|
71 |
+
❤️ If you like this work, please follow me on [Hugging Face](https://huggingface.co/macadeliccc) and [LinkedIn](https://www.linkedin.com/in/tim-dolan-python-dev/).
|
72 |
+
""",
|
73 |
+
examples=[
|
74 |
+
'Can you solve the equation 2x + 3 = 11 for x?',
|
75 |
+
'How does Fermats last theorem impact number theory?',
|
76 |
+
'What is a vector in the scope of computer science rather than physics?',
|
77 |
+
'Use a list comprehension to create a list of squares for numbers from 1 to 10.',
|
78 |
+
'Recommend some popular science fiction books.',
|
79 |
+
'Can you write a short story about a time-traveling detective?'
|
80 |
+
],
|
81 |
+
theme=gr.themes.Soft(primary_hue="purple"),
|
82 |
+
).launch()
|