kingabzpro commited on
Commit
fe0ac3f
1 Parent(s): 07da4b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -33
app.py CHANGED
@@ -1,6 +1,7 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
  import torch
 
 
4
 
5
 
6
  title = "🦅Falcon 🗨️ChatBot"
@@ -8,43 +9,54 @@ description = "Falcon-RW-1B is a 1B parameters causal decoder-only model built b
8
  examples = [["How are you?"]]
9
 
10
 
11
- tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b")
12
  model = AutoModelForCausalLM.from_pretrained(
13
  "tiiuae/falcon-rw-1b",
14
  trust_remote_code=True,
 
15
  )
16
 
17
 
18
- def predict(input, history=[]):
19
- # tokenize the new input sentence
20
- new_user_input_ids = tokenizer.encode(
21
- input + tokenizer.eos_token, return_tensors="pt"
22
- )
23
-
24
- # append the new user input tokens to the chat history
25
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
26
-
27
- # generate a response
28
- history = model.generate(
29
- bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
30
- ).tolist()
31
-
32
- # convert the tokens to text, and then split the responses into lines
33
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
34
- # print('decoded_response-->>'+str(response))
35
- response = [
36
- (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
37
- ] # convert to tuples of list
38
- # print('response-->>'+str(response))
39
- return response, history
40
-
41
-
42
- gr.Interface(
43
- fn=predict,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  title=title,
45
  description=description,
46
- examples=examples,
47
- inputs=["text", "state"],
48
- outputs=["chatbot", "state"],
49
- theme="finlaymacklon/boxy_violet",
50
- ).launch()
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
4
+ from threading import Thread
5
 
6
 
7
  title = "🦅Falcon 🗨️ChatBot"
 
9
  examples = [["How are you?"]]
10
 
11
 
12
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b",torch_dtype=torch.float16)
13
  model = AutoModelForCausalLM.from_pretrained(
14
  "tiiuae/falcon-rw-1b",
15
  trust_remote_code=True,
16
+ torch_dtype=torch.float16
17
  )
18
 
19
 
20
+ class StopOnTokens(StoppingCriteria):
21
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
22
+ stop_ids = [29, 0]
23
+ for stop_id in stop_ids:
24
+ if input_ids[0][-1] == stop_id:
25
+ return True
26
+ return False
27
+
28
+ def predict(message, history):
29
+
30
+ history_transformer_format = history + [[message, ""]]
31
+ stop = StopOnTokens()
32
+
33
+ messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) #curr_system_message +
34
+ for item in history_transformer_format])
35
+
36
+ model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
37
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
38
+ generate_kwargs = dict(
39
+ model_inputs,
40
+ streamer=streamer,
41
+ max_new_tokens=1024,
42
+ do_sample=True,
43
+ top_p=0.95,
44
+ top_k=1000,
45
+ temperature=1.0,
46
+ num_beams=1,
47
+ stopping_criteria=StoppingCriteriaList([stop])
48
+ )
49
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
50
+ t.start()
51
+
52
+ partial_message = ""
53
+ for new_token in streamer:
54
+ if new_token != '<':
55
+ partial_message += new_token
56
+ yield partial_message
57
+
58
+
59
+ gr.ChatInterface(predictfn=predict,
60
  title=title,
61
  description=description,
62
+ examples=examples).queue().launch()