Spaces:
Runtime error
Runtime error
kingabzpro
commited on
Commit
•
fe0ac3f
1
Parent(s):
07da4b7
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
import gradio as gr
|
3 |
import torch
|
|
|
|
|
4 |
|
5 |
|
6 |
title = "🦅Falcon 🗨️ChatBot"
|
@@ -8,43 +9,54 @@ description = "Falcon-RW-1B is a 1B parameters causal decoder-only model built b
|
|
8 |
examples = [["How are you?"]]
|
9 |
|
10 |
|
11 |
-
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b")
|
12 |
model = AutoModelForCausalLM.from_pretrained(
|
13 |
"tiiuae/falcon-rw-1b",
|
14 |
trust_remote_code=True,
|
|
|
15 |
)
|
16 |
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
title=title,
|
45 |
description=description,
|
46 |
-
examples=examples
|
47 |
-
inputs=["text", "state"],
|
48 |
-
outputs=["chatbot", "state"],
|
49 |
-
theme="finlaymacklon/boxy_violet",
|
50 |
-
).launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
4 |
+
from threading import Thread
|
5 |
|
6 |
|
7 |
title = "🦅Falcon 🗨️ChatBot"
|
|
|
9 |
examples = [["How are you?"]]
|
10 |
|
11 |
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b",torch_dtype=torch.float16)
|
13 |
model = AutoModelForCausalLM.from_pretrained(
|
14 |
"tiiuae/falcon-rw-1b",
|
15 |
trust_remote_code=True,
|
16 |
+
torch_dtype=torch.float16
|
17 |
)
|
18 |
|
19 |
|
20 |
+
class StopOnTokens(StoppingCriteria):
|
21 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
22 |
+
stop_ids = [29, 0]
|
23 |
+
for stop_id in stop_ids:
|
24 |
+
if input_ids[0][-1] == stop_id:
|
25 |
+
return True
|
26 |
+
return False
|
27 |
+
|
28 |
+
def predict(message, history):
|
29 |
+
|
30 |
+
history_transformer_format = history + [[message, ""]]
|
31 |
+
stop = StopOnTokens()
|
32 |
+
|
33 |
+
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) #curr_system_message +
|
34 |
+
for item in history_transformer_format])
|
35 |
+
|
36 |
+
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
37 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
38 |
+
generate_kwargs = dict(
|
39 |
+
model_inputs,
|
40 |
+
streamer=streamer,
|
41 |
+
max_new_tokens=1024,
|
42 |
+
do_sample=True,
|
43 |
+
top_p=0.95,
|
44 |
+
top_k=1000,
|
45 |
+
temperature=1.0,
|
46 |
+
num_beams=1,
|
47 |
+
stopping_criteria=StoppingCriteriaList([stop])
|
48 |
+
)
|
49 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
50 |
+
t.start()
|
51 |
+
|
52 |
+
partial_message = ""
|
53 |
+
for new_token in streamer:
|
54 |
+
if new_token != '<':
|
55 |
+
partial_message += new_token
|
56 |
+
yield partial_message
|
57 |
+
|
58 |
+
|
59 |
+
gr.ChatInterface(predictfn=predict,
|
60 |
title=title,
|
61 |
description=description,
|
62 |
+
examples=examples).queue().launch()
|
|
|
|
|
|
|
|