kingabzpro commited on
Commit
5e303ca
1 Parent(s): 03d183a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+ import torch
4
+
5
+
6
+ title = "🦅Falcon 🗨️ChatBot"
7
+ description ="Falcon-RW-1B is a 1B parameters causal decoder-only model built by TII and trained on 350B tokens of RefinedWeb."
8
+ examples = [["How are you?"]]
9
+
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b")
12
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-rw-1b")
13
+
14
+ def predict(input, history=[]):
15
+ # tokenize the new input sentence
16
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
17
+
18
+ # append the new user input tokens to the chat history
19
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
20
+
21
+ # generate a response
22
+ history = model.generate(bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id).tolist()
23
+
24
+ # convert the tokens to text, and then split the responses into lines
25
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
26
+ #print('decoded_response-->>'+str(response))
27
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
28
+ #print('response-->>'+str(response))
29
+ return response, history
30
+
31
+ gr.Interface(fn=predict,
32
+ title=title,
33
+ description=description,
34
+ examples=examples,
35
+ inputs=["text", "state"],
36
+ outputs=["chatbot", "state"],
37
+ theme='finlaymacklon/boxy_violet').launch()