aegrif commited on
Commit
03e1a94
1 Parent(s): 899dce0

Upload 2 files

Browse files
Files changed (1) hide show
  1. app.py +78 -35
app.py CHANGED
@@ -3,30 +3,44 @@ import tensorflow as tf
3
  import time
4
  import warnings
5
  import os
6
- from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, AutoModelForCausalLM, \
7
- TFAutoModelForQuestionAnswering, pipeline, TFT5ForConditionalGeneration
 
 
 
 
 
8
 
9
  # Warning Suppression
10
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
11
- warnings.filterwarnings('ignore', category=DeprecationWarning)
12
- warnings.filterwarnings('ignore', category=FutureWarning)
13
- tf.get_logger().setLevel('ERROR')
 
14
 
15
- emotion_tokenizer = AutoTokenizer.from_pretrained("aegrif/CIS6930_DAAGR_Classification")
16
- emotion_model = TFAutoModelForSequenceClassification.from_pretrained("aegrif/CIS6930_DAAGR_Classification")
 
 
17
 
18
  # generation models
19
- # encoder
20
- distilbert_model = TFAutoModelForQuestionAnswering.from_pretrained("aegrif/CIS6930_DAAGR_DistilBert")
21
- distilbert_tokenizer = AutoTokenizer.from_pretrained("aegrif/CIS6930_DAAGR_DistilBert")
 
 
 
22
 
23
  # decoder
24
- gpt2_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
25
- gpt2_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
 
 
26
 
27
  # encoder-decoder
28
- t5_model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
29
- t5_tokenizer = AutoTokenizer.from_pretrained("t5-small")
 
30
 
31
  emotion_dict = {'disappointed': 0, 'annoyed': 1, 'excited': 2, 'afraid': 3, 'disgusted': 4, 'grateful': 5,
32
  'impressed': 6, 'prepared': 7}
@@ -34,23 +48,31 @@ inverted_emotion_dict = {v: k for k, v in emotion_dict.items()}
34
 
35
 
36
  def get_context(user_input):
37
- new_user_input_ids = emotion_tokenizer.encode(user_input, return_tensors='tf')
38
- output = emotion_model.predict(new_user_input_ids)[0]
39
- prediction = tf.argmax(output, axis=1).numpy()[0]
40
- context = inverted_emotion_dict.get(prediction)
41
 
42
  return context
43
 
44
 
45
- def predict_distilbert(user_input, history):
46
- # Get the context from the user input
47
- context = get_context(user_input)
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- # Generate a response using the DistilBert model
50
- question_answerer = pipeline("question-answering", model=distilbert_model, tokenizer=distilbert_tokenizer)
51
- output = question_answerer(question=user_input, context=context)
52
  # Decode the generated response
53
- bot_response = output['answer']
54
 
55
  return bot_response
56
 
@@ -59,11 +81,23 @@ def predict_gpt2(user_input, history):
59
  # Get the context from the user input
60
  context = get_context(user_input)
61
 
 
62
  # Generate a response using the DialoGPT model
63
- chatbot = pipeline("text-generation", model=gpt2_model, tokenizer=gpt2_tokenizer, pad_token_id=50256)
64
- output = chatbot(user_input + context, max_length=100, num_return_sequences=1)
 
 
 
 
 
 
 
 
 
 
 
65
  # Decode the generated response
66
- bot_response = output[0]['generated_text']
67
 
68
  return bot_response
69
 
@@ -72,9 +106,19 @@ def predict_t5(user_input, history):
72
  # Get the context from the user input
73
  context = get_context(user_input)
74
 
 
75
  # Generate a response using the T5 model
76
- chatbot = pipeline("text2text-generation", model=t5_model, tokenizer=t5_tokenizer, pad_token_id=50256)
77
- bot_response = chatbot(user_input + context, max_length=100, num_return_sequences=1)[0]['generated_text']
 
 
 
 
 
 
 
 
 
78
 
79
  return bot_response
80
 
@@ -83,14 +127,13 @@ def user(user_message, history):
83
  return "", history + [[user_message, None]]
84
 
85
 
86
- def distil_bot(history):
87
  user_message = history[-1][0]
88
- bot_message = predict_distilbert(user_message, history)
89
  history[-1][1] = bot_message
90
  time.sleep(1)
91
  return history
92
 
93
-
94
  def gpt2_bot(history):
95
  user_message = history[-1][0]
96
  bot_message = predict_gpt2(user_message, history)
@@ -120,7 +163,7 @@ with gr.Blocks() as demo:
120
  msg3 = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
121
 
122
  msg1.submit(user, [msg1, chatbot1], [msg1, chatbot1], queue=False).then(
123
- distil_bot, chatbot1, chatbot1
124
  )
125
  msg2.submit(user, [msg2, chatbot2], [msg2, chatbot2], queue=False).then(
126
  gpt2_bot, chatbot2, chatbot2
 
3
  import time
4
  import warnings
5
  import os
6
+ from transformers import (
7
+ AutoTokenizer,
8
+ AutoModelForSeq2SeqLM,
9
+ pipeline,
10
+ AutoModelForCausalLM,
11
+ AutoModelForSequenceClassification,
12
+ )
13
 
14
  # Warning Suppression
15
+ # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
16
+ # warnings.filterwarnings('ignore', category=DeprecationWarning)
17
+ # warnings.filterwarnings('ignore', category=FutureWarning)
18
+ # warnings.filterwarnings('ignore', category=UserWarning)
19
+ # tf.get_logger().setLevel('ERROR')
20
 
21
+ # emotion classification
22
+ emotion_tokenizer = "aegrif/CIS6930_DAAGR_Classification"
23
+ emotion_model = "aegrif/CIS6930_DAAGR_Classification"
24
+ emotion_pipeline = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
25
 
26
  # generation models
27
+
28
+ # no emotion
29
+ gpt2_model_no_emo = "aegrif/CIS6930_DAAGR_GPT2_NoEmo"
30
+ gpt2_tokenizer_no_emo = "aegrif/CIS6930_DAAGR_GPT2_NoEmo"
31
+ chatbot_gpt_no_emo = pipeline(model=gpt2_model_no_emo, tokenizer=gpt2_tokenizer_no_emo, pad_token_id=50256)
32
+
33
 
34
  # decoder
35
+ gpt2_model_emo = "aegrif/CIS6930_DAAGR_GPT2_Emo"
36
+ gpt2_tokenizer = "aegrif/CIS6930_DAAGR_GPT2_Emo"
37
+ chatbot_gpt_emo = pipeline(model=gpt2_model_emo, tokenizer=gpt2_tokenizer, pad_token_id=50256)
38
+
39
 
40
  # encoder-decoder
41
+ t5_model_emo = "aegrif/CIS6930_DAAGR_T5_Emo"
42
+ t5_tokenizer = "t5-small"
43
+ chatbot_t5_emo = pipeline(model=t5_model_emo, tokenizer=t5_tokenizer)
44
 
45
  emotion_dict = {'disappointed': 0, 'annoyed': 1, 'excited': 2, 'afraid': 3, 'disgusted': 4, 'grateful': 5,
46
  'impressed': 6, 'prepared': 7}
 
48
 
49
 
50
  def get_context(user_input):
51
+
52
+ output = emotion_pipeline(user_input)[0]['label']
53
+
54
+ context = inverted_emotion_dict.get(int(output[-1]))
55
 
56
  return context
57
 
58
 
59
+ def predict_gpt2_no_emo(user_input, history):
60
+ user_input = "<|user|>" + user_input + " <|bot|>"
61
+
62
+ output = chatbot_gpt_no_emo(
63
+ user_input,
64
+ max_new_tokens=40,
65
+ num_return_sequences=1,
66
+ do_sample=True,
67
+ temperature=0.5,
68
+ renormalize_logits=True,
69
+ exponential_decay_length_penalty=(5, 1.1),
70
+ no_repeat_ngram_size=3,
71
+ repetition_penalty=1.5
72
+ )
73
 
 
 
 
74
  # Decode the generated response
75
+ bot_response = output[0]['generated_text'].split("<|bot|>")[1].strip()
76
 
77
  return bot_response
78
 
 
81
  # Get the context from the user input
82
  context = get_context(user_input)
83
 
84
+ user_input = f"<|context|>{context} <|user|>{user_input} <|bot|>"
85
  # Generate a response using the DialoGPT model
86
+
87
+ output = chatbot_gpt_emo(
88
+ user_input,
89
+ max_new_tokens=40,
90
+ num_return_sequences=1,
91
+ do_sample=True,
92
+ temperature=0.5,
93
+ renormalize_logits=True,
94
+ exponential_decay_length_penalty=(5, 1.1),
95
+ no_repeat_ngram_size=3,
96
+ repetition_penalty=1.5
97
+ )
98
+
99
  # Decode the generated response
100
+ bot_response = output[0]['generated_text'].split("<|bot|>")[1].strip()
101
 
102
  return bot_response
103
 
 
106
  # Get the context from the user input
107
  context = get_context(user_input)
108
 
109
+ user_input = f"question: {user_input} context: {context} </s>"
110
  # Generate a response using the T5 model
111
+ bot_response = chatbot_t5_emo(
112
+ user_input,
113
+ max_new_tokens=40,
114
+ num_return_sequences=1,
115
+ do_sample=True,
116
+ temperature=0.5,
117
+ renormalize_logits=True,
118
+ exponential_decay_length_penalty=(5, 1),
119
+ no_repeat_ngram_size=3,
120
+ repetition_penalty=1.5
121
+ )[0]['generated_text']
122
 
123
  return bot_response
124
 
 
127
  return "", history + [[user_message, None]]
128
 
129
 
130
+ def gpt2_bot_no_emo(history):
131
  user_message = history[-1][0]
132
+ bot_message = predict_gpt2_no_emo(user_message, history)
133
  history[-1][1] = bot_message
134
  time.sleep(1)
135
  return history
136
 
 
137
  def gpt2_bot(history):
138
  user_message = history[-1][0]
139
  bot_message = predict_gpt2(user_message, history)
 
163
  msg3 = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
164
 
165
  msg1.submit(user, [msg1, chatbot1], [msg1, chatbot1], queue=False).then(
166
+ gpt2_bot_no_emo, chatbot1, chatbot1
167
  )
168
  msg2.submit(user, [msg2, chatbot2], [msg2, chatbot2], queue=False).then(
169
  gpt2_bot, chatbot2, chatbot2