Mandar Patil commited on
Commit
d4bf0a7
1 Parent(s): 2c6eb1f

initial commit

Browse files
Files changed (2) hide show
  1. 01_godelui_app.py +36 -0
  2. requirements.txt +2 -0
01_godelui_app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[ ]:
5
+
6
+
7
+ import gradio as gr
8
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
11
+ model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/GODEL-v1_1-large-seq2seq")
12
+
13
+ def predict(input,knowledge, history=[]):
14
+ # instruction="Instruction: given a dialog context and related knowledge, you need to answer the question based on the knowledge."
15
+ instruction="Instruction: given a dialog context, you need to response empathically"
16
+ knowledge = '[KNOWLEDGE]' + knowledge
17
+ s = list(sum(history, ()))
18
+ s.append(input)
19
+ dialog = ' EOS ' .join(s)
20
+ query = f"{instruction} [CONTEXT] {dialog} {knowledge}"
21
+ top_p = 0.9
22
+ min_length = 8
23
+ max_length = 64
24
+ new_user_input_ids = tokenizer.encode(f"{query}", return_tensors='pt')
25
+ print(input,s)
26
+ output = model.generate(new_user_input_ids, min_length=int(
27
+ min_length), max_length=int(max_length), top_p=top_p, do_sample=True).tolist()
28
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
29
+ history.append((input, response))
30
+ return history, history
31
+
32
+ gr.Interface(fn=predict,
33
+ inputs=["text","text",'state'],
34
+
35
+ outputs=["chatbot",'state']).launch(debug = True, share = True)
36
+
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio
2
+ transformers