Minh Q. Le commited on
Commit
133dc65
β€’
1 Parent(s): 2958d01

Added DeBERTa model from previous semester

Browse files
best_model_fold2.h5 β†’ Model/DeBERTa/best_model_fold2.h5 RENAMED
File without changes
Model/DeBERTa/deberta.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorflow as tf
3
+ import tensorflow_addons as tfa
4
+ from tensorflow.keras import layers
5
+ import transformers
6
+ import os
7
+
8
+ MAX_LENGTH = 512 # the maximum number of messages per input
9
+ BATCH_SIZE = 8 # number of messages processed at a time
10
+
11
+
12
+ class MeanPool(tf.keras.layers.Layer):
13
+ def call(self, inputs, mask=None):
14
+ broadcast_mask = tf.expand_dims(tf.cast(mask, "float32"), -1)
15
+ embedding_sum = tf.reduce_sum(inputs * broadcast_mask, axis=1)
16
+ mask_sum = tf.reduce_sum(broadcast_mask, axis=1)
17
+ mask_sum = tf.math.maximum(mask_sum, tf.constant([1e-9]))
18
+ return embedding_sum / mask_sum
19
+
20
+
21
+ class WeightsSumOne(tf.keras.constraints.Constraint):
22
+ def __call__(self, w):
23
+ return tf.nn.softmax(w, axis=0)
24
+
25
+
26
+ def deberta_init(
27
+ pretrained_model_name: str = "microsoft/deberta-v3-large", tokenizer_dir: str = "."
28
+ ):
29
+ """Helper function to quickly initialize the config and tokenizer for a model
30
+
31
+ Args:
32
+ pretrained_model_name (str, optional): The model name. Defaults to "microsoft/deberta-v3-large".
33
+ tokenizer_dir (str, optional): Directory of the tokenizer. Defaults to ".".
34
+
35
+ Returns:
36
+ The configuration and tokenizer of the model.
37
+ """
38
+ tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained_model_name)
39
+ tokenizer_path = os.path.join(tokenizer_dir, "tokenizer")
40
+ tokenizer.save_pretrained(tokenizer_path)
41
+
42
+ cfg = transformers.AutoConfig.from_pretrained(
43
+ pretrained_model_name, output_hidden_states=True
44
+ )
45
+ cfg.hidden_dropout_prob = 0
46
+ cfg.attention_probs_dropout_prob = 0
47
+ cfg.save_pretrained(tokenizer_path)
48
+ return cfg, tokenizer
49
+
50
+
51
+ def get_model(cfg):
52
+ """Get a DeBERTa model using the specified configuration
53
+
54
+ Args:
55
+ cfg : the configuration of the model (can be generated using deberta_init)
56
+
57
+ Returns:
58
+ The model with respect to the given configuration.
59
+ """
60
+ input_ids = tf.keras.layers.Input(
61
+ shape=(MAX_LENGTH,), dtype=tf.int32, name="input_ids"
62
+ )
63
+
64
+ attention_masks = tf.keras.layers.Input(
65
+ shape=(MAX_LENGTH,), dtype=tf.int32, name="attention_masks"
66
+ )
67
+
68
+ deberta_model = transformers.TFAutoModel.from_pretrained(
69
+ "microsoft/deberta-v3-large", config=cfg
70
+ )
71
+
72
+ REINIT_LAYERS = 1
73
+ normal_initializer = tf.keras.initializers.GlorotUniform()
74
+ zeros_initializer = tf.keras.initializers.Zeros()
75
+ ones_initializer = tf.keras.initializers.Ones()
76
+
77
+ for encoder_block in deberta_model.deberta.encoder.layer[-REINIT_LAYERS:]:
78
+ for layer in encoder_block.submodules:
79
+ if isinstance(layer, tf.keras.layers.Dense):
80
+ layer.kernel.assign(
81
+ normal_initializer(
82
+ shape=layer.kernel.shape, dtype=layer.kernel.dtype
83
+ )
84
+ )
85
+ if layer.bias is not None:
86
+ layer.bias.assign(
87
+ zeros_initializer(
88
+ shape=layer.bias.shape, dtype=layer.bias.dtype
89
+ )
90
+ )
91
+
92
+ elif isinstance(layer, tf.keras.layers.LayerNormalization):
93
+ layer.beta.assign(
94
+ zeros_initializer(shape=layer.beta.shape, dtype=layer.beta.dtype)
95
+ )
96
+ layer.gamma.assign(
97
+ ones_initializer(shape=layer.gamma.shape, dtype=layer.gamma.dtype)
98
+ )
99
+
100
+ deberta_output = deberta_model.deberta(input_ids, attention_mask=attention_masks)
101
+ hidden_states = deberta_output.hidden_states
102
+
103
+ # WeightedLayerPool + MeanPool of the last 4 hidden states
104
+ stack_meanpool = tf.stack(
105
+ [MeanPool()(hidden_s, mask=attention_masks) for hidden_s in hidden_states[-4:]],
106
+ axis=2,
107
+ )
108
+
109
+ weighted_layer_pool = layers.Dense(
110
+ 1, use_bias=False, kernel_constraint=WeightsSumOne()
111
+ )(stack_meanpool)
112
+
113
+ weighted_layer_pool = tf.squeeze(weighted_layer_pool, axis=-1)
114
+ output = layers.Dense(15, activation="linear")(weighted_layer_pool)
115
+
116
+ model = tf.keras.Model(inputs=[input_ids, attention_masks], outputs=output)
117
+
118
+ # Compile model with Layer-wise Learning Rate Decay
119
+ layer_list = [deberta_model.deberta.embeddings] + list(
120
+ deberta_model.deberta.encoder.layer
121
+ )
122
+ layer_list.reverse()
123
+
124
+ INIT_LR = 1e-5
125
+ LLRDR = 0.9
126
+ LR_SCH_DECAY_STEPS = 1600
127
+
128
+ lr_schedules = [
129
+ tf.keras.optimizers.schedules.ExponentialDecay(
130
+ initial_learning_rate=INIT_LR * LLRDR**i,
131
+ decay_steps=LR_SCH_DECAY_STEPS,
132
+ decay_rate=0.3,
133
+ )
134
+ for i in range(len(layer_list))
135
+ ]
136
+ lr_schedule_head = tf.keras.optimizers.schedules.ExponentialDecay(
137
+ initial_learning_rate=1e-4, decay_steps=LR_SCH_DECAY_STEPS, decay_rate=0.3
138
+ )
139
+
140
+ optimizers = [
141
+ tf.keras.optimizers.Adam(learning_rate=lr_sch) for lr_sch in lr_schedules
142
+ ]
143
+
144
+ optimizers_and_layers = [
145
+ (tf.keras.optimizers.Adam(learning_rate=lr_schedule_head), model.layers[-4:])
146
+ ] + list(zip(optimizers, layer_list))
147
+
148
+ optimizer = tfa.optimizers.MultiOptimizer(optimizers_and_layers)
149
+
150
+ model.compile(
151
+ optimizer=optimizer,
152
+ loss="mse",
153
+ metrics=[tf.keras.metrics.RootMeanSquaredError()],
154
+ )
155
+ return model
156
+
157
+
158
+ def deberta_encode(texts: str, tokenizer):
159
+ """Helper function to tokenize the text using the specified tokenizer"""
160
+ input_ids = []
161
+ attention_mask = []
162
+
163
+ for text in texts:
164
+ token = tokenizer(
165
+ text,
166
+ add_special_tokens=True,
167
+ max_length=512,
168
+ return_attention_mask=True,
169
+ return_tensors="np",
170
+ truncation=True,
171
+ padding="max_length",
172
+ )
173
+ input_ids.append(token["input_ids"][0])
174
+ attention_mask.append(token["attention_mask"][0])
175
+
176
+ return np.array(input_ids, dtype="int32"), np.array(attention_mask, dtype="int32")
177
+
178
+
179
+ def predict(model, tokenizer, texts):
180
+ """Predict the labels for each messages in texts
181
+
182
+ Args:
183
+ model: your DeBERTa model
184
+ tokenizer: a tokenizer (can be generated by deberta_init)
185
+ texts (_type_): _description_
186
+
187
+ Returns:
188
+ _type_: _description_
189
+ """
190
+ prediction = model.predict(deberta_encode(texts, tokenizer))
191
+ labels = np.argmax(prediction, axis=1)
192
+ return labels
193
+
194
+
195
+ def load_model(cfg, model_dir: str = "."):
196
+ """Helper function to load a DeBERTa model with pretrained weights
197
+
198
+ Args:
199
+ cfg: configuration for the model (can be generated with deberta_init)
200
+ model_dir (str, optional): the directory of the pretrained weights. Defaults to ".".
201
+
202
+ Returns:
203
+ A DeBERTa model with pretrained weights.
204
+ """
205
+ tf.keras.backend.clear_session()
206
+ model = get_model(cfg)
207
+ model_path = os.path.join(model_dir, "best_model_fold2.h5")
208
+ model.load_weights(model_path)
209
+ return model
210
+
211
+
212
+ # map the integer labels to their original string representation
213
+ DEBERTA_LABEL_MAP = {
214
+ 0: "Greeting",
215
+ 1: "Curiosity",
216
+ 2: "Interest",
217
+ 3: "Obscene",
218
+ 4: "Annoyed",
219
+ 5: "Openness",
220
+ 6: "Anxious",
221
+ 7: "Acceptance",
222
+ 8: "Uninterested",
223
+ 9: "Informative",
224
+ 10: "Accusatory",
225
+ 11: "Denial",
226
+ 12: "Confused",
227
+ 13: "Disapproval",
228
+ 14: "Remorse",
229
+ }
230
+
231
+
232
+ def decode_deberta_label(numeric_label):
233
+ return DEBERTA_LABEL_MAP.get(numeric_label, "Unknown Label")
app.ipynb DELETED
@@ -1,457 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 28,
6
- "metadata": {},
7
- "outputs": [],
8
- "source": [
9
- "import os\n",
10
- "import openai \n",
11
- "from openai import OpenAI\n",
12
- "from dotenv import load_dotenv, find_dotenv\n",
13
- "\n",
14
- "import re\n",
15
- "import matplotlib.pyplot as plt\n",
16
- "import seaborn as sns"
17
- ]
18
- },
19
- {
20
- "cell_type": "code",
21
- "execution_count": 29,
22
- "metadata": {},
23
- "outputs": [],
24
- "source": [
25
- "sample_input = \\\n",
26
- "\"\"\"\n",
27
- "Visitor: Heyyy\n",
28
- "Visitor: How are you this evening\n",
29
- "Agent: better now ;) call me\n",
30
- "Visitor: I am at work for now, be off around 10pm\n",
31
- "Visitor: Need some company\n",
32
- "Visitor: Are you independent honey\n",
33
- "Agent: well since you arent available at the moment ill just come out and say-these sites are bad news. \\\n",
34
- " did you know that most of the girls on here are here against their will? \\\n",
35
- " Most of them got dragged into this lifestyle by an abuser, \\\n",
36
- " oftentimes before they were of legal consenting age. isnt that sad?\n",
37
- "Agent: we are with some guys who are trying to spread awareness of the realities of this \"industry\".\n",
38
- "Agent: https://exoduscry.com/choice/\n",
39
- "Visitor: Thanks\n",
40
- "Agent: i encourage you to watch this video. it is jarring to think about how bad someone else's options must be to choose to be on these sites\n",
41
- "Visitor: Ooohhh\n",
42
- "Agent: selling their body to make ends meet or appease a pimp\n",
43
- "Visitor: That's really awful\n",
44
- "Agent: it is. you seem like the kind of guy who wouldnt wont to proliferate that kind of harmful lifestyle. am i right in thinking that?\n",
45
- "Visitor: Well iam just looking for attention\n",
46
- "Visitor: My marriage is not going well lol\n",
47
- "Agent: i know that it is hard to find ourselves lonely and without much alternative to meet that perceived need but \\\n",
48
- " its humbling to think that our needs can force someone else into such a dark place\n",
49
- "Agent: hey, thanks for sharing that my man. i know it can be hard\n",
50
- "Agent: marraige is the most humbling of relationships, isnt it?\n",
51
- "Visitor: She leaves with her friends n no time for me\n",
52
- "Agent: ive been there my guy. i know that it is alot easier to numb that loneliness for sure\n",
53
- "Visitor: I want to be faithful\n",
54
- "Agent: does your wife know how you feel when she chooses her friends instead of you?\n",
55
- "Visitor: I been drinking lately\n",
56
- "Visitor: Yes, she takes pills\n",
57
- "Agent: if so, i hope you are praying for her to realize the hurt she is causing and to seek change\n",
58
- "Visitor: She had surgery 4 yes ago n it's been hard for her n her addiction on pills\n",
59
- "Visitor: Yes for now i am looking for a female friend to talk n see what can we do for each other\n",
60
- "Agent: that is hard my man. physical pain is a huge obstacle in life for sure so i hear you\n",
61
- "Visitor: Well chat later. thanks\n",
62
- "Agent: have you considered pursuing other men who can encourage you instead of looking for the easy way out?\n",
63
- "Agent: what is your name my friend? i will be praying for you by name if you wouldnt mind sharing it\n",
64
- "Agent: well, i gotta run. watch that video i sent and i will definitely be praying for you. \\\n",
65
- " I hope you pray for yourself and for your wife - God can definitely intervene and cause complete change in the situation if He wills it. \\\n",
66
- " He is good and He hears you. You are loved by Him, brother. Good night\n",
67
- "\"\"\""
68
- ]
69
- },
70
- {
71
- "cell_type": "code",
72
- "execution_count": 30,
73
- "metadata": {},
74
- "outputs": [],
75
- "source": [
76
- "sample_output = \\\n",
77
- "\"\"\"\n",
78
- "Visitor: Heyyy\n",
79
- "[Greeting]\n",
80
- "Visitor: How are you this evening\n",
81
- "[Greeting]\n",
82
- "Agent: better now ;) call me\n",
83
- "[Openness]\n",
84
- "Visitor: I am at work for now, be off around 10pm\n",
85
- "[Interest]\n",
86
- "Visitor: Need some company\n",
87
- "[Interest]\n",
88
- "Visitor: Are you independent honey\n",
89
- "[Interest]\n",
90
- "Agent: well since you arent available at the moment ill just come out and say-these sites are bad news. \\\n",
91
- " did you know that most of the girls on here are here against their will? \\\n",
92
- " Most of them got dragged into this lifestyle by an abuser, \\\n",
93
- " oftentimes before they were of legal consenting age. isnt that sad?\n",
94
- "[Informative]\n",
95
- "Agent: we are with some guys who are trying to spread awareness of the realities of this \"industry\".\n",
96
- "[Informative]\n",
97
- "Agent: https://exoduscry.com/choice/\n",
98
- "[Informative]\n",
99
- "Visitor: Thanks\n",
100
- "[Acceptance]\n",
101
- "Agent: i encourage you to watch this video. it is jarring to think about how bad someone else's options must be to choose to be on these sites\n",
102
- "[Informative]\n",
103
- "Visitor: Ooohhh\n",
104
- "[Interest]\n",
105
- "Agent: selling their body to make ends meet or appease a pimp\n",
106
- "[Informative]\n",
107
- "Visitor: That's really awful\n",
108
- "[Remorse]\n",
109
- "Agent: it is. you seem like the kind of guy who wouldnt wont to proliferate that kind of harmful lifestyle. am i right in thinking that?\n",
110
- "[Accusatory]\n",
111
- "Visitor: Well iam just looking for attention\n",
112
- "[Anxious]\n",
113
- "Visitor: My marriage is not going well lol\n",
114
- "[Anxious]\n",
115
- "Agent: i know that it is hard to find ourselves lonely and without much alternative to meet that perceived need but \\\n",
116
- " its humbling to think that our needs can force someone else into such a dark place\n",
117
- "[Informative]\n",
118
- "Agent: hey, thanks for sharing that my man. i know it can be hard\n",
119
- "[Acceptance]\n",
120
- "Agent: marraige is the most humbling of relationships, isnt it?\n",
121
- "[Openness]\n",
122
- "Visitor: She leaves with her friends n no time for me\n",
123
- "[Annoyed]\n",
124
- "Agent: ive been there my guy. i know that it is alot easier to numb that loneliness for sure\n",
125
- "[Acceptance]\n",
126
- "Visitor: I want to be faithful\n",
127
- "[Acceptance]\n",
128
- "Agent: does your wife know how you feel when she chooses her friends instead of you?\n",
129
- "[Curiosity]\n",
130
- "Visitor: I been drinking lately\n",
131
- "[Anxious]\n",
132
- "Visitor: Yes, she takes pills\n",
133
- "[Anxious]\n",
134
- "Agent: if so, i hope you are praying for her to realize the hurt she is causing and to seek change\n",
135
- "[Interest]\n",
136
- "Visitor: She had surgery 4 yes ago n it's been hard for her n her addiction on pills\n",
137
- "[Anxious]\n",
138
- "Visitor: Yes for now i am looking for a female friend to talk n see what can we do for each other\n",
139
- "[Informative]\n",
140
- "Agent: that is hard my man. physical pain is a huge obstacle in life for sure so i hear you\n",
141
- "[Acceptance]\n",
142
- "Visitor: Well chat later. thanks\n",
143
- "[Openness]\n",
144
- "Agent: have you considered pursuing other men who can encourage you instead of looking for the easy way out?\n",
145
- "[Informative]\n",
146
- "Agent: what is your name my friend? i will be praying for you by name if you wouldnt mind sharing it\n",
147
- "[Openness]\n",
148
- "Agent: well, i gotta run. watch that video i sent and i will definitely be praying for you. \\\n",
149
- " I hope you pray for yourself and for your wife - God can definitely intervene and cause complete change in the situation if He wills it. \\\n",
150
- " He is good and He hears you. You are loved by Him, brother. Good night\n",
151
- "[Openness]\n",
152
- "\n",
153
- "Sentiment Flow Analysis on the Visitor's side:\n",
154
- "\n",
155
- "The Visitor begins the conversation with a friendly and casual tone, expressing a desire for company and showing interest in the Agent. \\\n",
156
- "However, as the Agent provides information about the harsh realities of the commercial sex industry, the Visitor's sentiment shifts to acceptance of the information \\\n",
157
- "and a sense of confusion and remorse about the situation.\n",
158
- "\n",
159
- "The Visitor then reveals personal issues, indicating anxiety and seeking attention due to marital problems. \\\n",
160
- "The sentiment continues to be anxious as the Visitor discusses personal struggles with alcohol and his wife's pill addiction, \\\n",
161
- "showing a need for companionship and support.\n",
162
- "\n",
163
- "Despite the heavy topics, the Visitor expresses a desire to remain faithful and shows interest in finding a friend, albeit with a hint of desperation. \\\n",
164
- "The Visitor openly takes the Agent's information and the conversation flows smoothly as both the Visitor and the Agent \\\n",
165
- "show openness toward each other.\n",
166
- "\"\"\""
167
- ]
168
- },
169
- {
170
- "cell_type": "code",
171
- "execution_count": 65,
172
- "metadata": {},
173
- "outputs": [],
174
- "source": [
175
- "def get_completion(conversation, model=\"gpt-4-1106-preview\"):\n",
176
- "\n",
177
- " prompt = f\"\"\"\n",
178
- " The EPIK Project is about mobilizing male allies \\\n",
179
- " to disrupt the commercial sex market, \\\n",
180
- " equipping them to combat the roots of exploitation \\\n",
181
- " and encouraging them to collaborate effectively \\\n",
182
- " with the wider anti-trafficking movement. \\\n",
183
- " You are an adept expert conversation sentiment analyzer. \\\n",
184
- " Your job is to analyze the conversation and provide a report \\\n",
185
- " based on the sentiment flow of the conversation on the visitor's \\\n",
186
- " perspective. Visitor indicates the potential buyer, and Agent indicates the volunteer from EPIK. \\\n",
187
- " The conversation is going to be given in the format:\n",
188
- " \n",
189
- " Visitor: <Visitor's message here>\n",
190
- " Agent: <Agent's message here>\n",
191
- " \n",
192
- " The actual conversation is delimited by triple backticks\n",
193
- " ```{conversation}```\n",
194
- " \n",
195
- " Here is the list of sentiment labels you should use delimited by square brackets. \\\n",
196
- " [\"Openness\", \"Anxious\", \"Confused\", \"Disapproval\", \"Remorse\", \"Accusatory\", \\\n",
197
- " \"Denial\", \"Obscene\", \"Uninterested\", \"Annoyed\", \"Informative\", \"Greeting\", \\\n",
198
- " \"Interest\", \"Curiosity\", \"Acceptance\"]\n",
199
- " \n",
200
- " Your output should look like:\n",
201
- " ```\n",
202
- " Speaker: <Speaker's message here>\n",
203
- " [sentiment label]\n",
204
- " ...\n",
205
- " Speaker: <Speaker's message here>\n",
206
- " [sentiment label]\n",
207
- " ```\n",
208
- " \n",
209
- " where Speaker can either be Visitor or Agent. Then, you should write your report on the sentiment flow \\\n",
210
- " on the Visitor's side below.\n",
211
- "\n",
212
- " Here is a sample input delimited by triple backticks\n",
213
- "\n",
214
- " ```{sample_input}```\n",
215
- "\n",
216
- " Here is a same output that you should try to aim for delimited by sqaure brackets\n",
217
- "\n",
218
- " [{sample_output}]\n",
219
- " \"\"\"\n",
220
- " \n",
221
- " client = OpenAI()\n",
222
- " \n",
223
- " messages = [{\"role\": \"user\", \"content\": prompt}]\n",
224
- " response = client.chat.completions.create(\n",
225
- " model=model,\n",
226
- " messages=messages,\n",
227
- " temperature=0, # this is the degree of randomness of the model's output\n",
228
- " )\n",
229
- "\n",
230
- " analysis = response.choices[0].message.content\n",
231
- "\n",
232
- " def extract_conv_with_labels(analysis):\n",
233
- " analysis = analysis.replace(\"\\n\", \" \")\n",
234
- " BETWEEN_BACKTICKS = \"\\\\`\\\\`\\\\`(.*?)\\\\`\\\\`\\\\`\"\n",
235
- " match = re.search(BETWEEN_BACKTICKS, analysis)\n",
236
- " if match:\n",
237
- " conv_with_labels = match.group()[4:-4]\n",
238
- " else:\n",
239
- " return \"OUTPUT IS IN WRONG FORMAT\"\n",
240
- " \n",
241
- " # just reformatting it for better format\n",
242
- " conv_with_labels = conv_with_labels.split('] ')\n",
243
- " temp = [utterance + ']' for utterance in conv_with_labels[:-1]]\n",
244
- " conv_with_labels = temp + [conv_with_labels[-1]]\n",
245
- " return conv_with_labels\n",
246
- "\n",
247
- " grouped_sentiments = {\n",
248
- " 'Acceptance': 3,\n",
249
- " 'Openness': 3,\n",
250
- " 'Interest': 2,\n",
251
- " 'Curiosity': 2,\n",
252
- " 'Informative': 1,\n",
253
- " 'Greeting': 0,\n",
254
- " 'None': 0,\n",
255
- " 'Uninterested': -1,\n",
256
- " 'Anxious': -2,\n",
257
- " 'Confused': -2,\n",
258
- " 'Annoyed': -2,\n",
259
- " 'Remorse': -2,\n",
260
- " 'Disapproval': -3,\n",
261
- " 'Accusatory': -3,\n",
262
- " 'Denial': -3,\n",
263
- " 'Obscene': -3\n",
264
- " }\n",
265
- "\n",
266
- "\n",
267
- " def sentiment_flow_plot(conv):\n",
268
- " conv_with_labels = extract_conv_with_labels(analysis)\n",
269
- " num_utterances = len(conv_with_labels)\n",
270
- "\n",
271
- " visitor_Y = [''] * num_utterances\n",
272
- " agent_Y = [''] * num_utterances\n",
273
- "\n",
274
- " for i in range(num_utterances):\n",
275
- " utterance = conv_with_labels[i]\n",
276
- " match = re.search(r'\\[(.*?)\\]$', utterance)\n",
277
- " if match:\n",
278
- " label = match.group(1)\n",
279
- " else:\n",
280
- " print(\"OUTPUT IS IN WRONG FORMAT\")\n",
281
- " break\n",
282
- " \n",
283
- " if utterance.startswith('Visitor'):\n",
284
- " visitor_Y[i] = label\n",
285
- " if i == 0:\n",
286
- " agent_Y[i] = 'None'\n",
287
- " else:\n",
288
- " agent_Y[i] = agent_Y[i-1]\n",
289
- " elif utterance.startswith('Agent'):\n",
290
- " agent_Y[i] = label\n",
291
- " if i == 0:\n",
292
- " visitor_Y[i] = 'None'\n",
293
- " else:\n",
294
- " visitor_Y[i] = visitor_Y[i-1]\n",
295
- "\n",
296
- " X = range(1,num_utterances+1)\n",
297
- " visitor_Y_converted = [grouped_sentiments[visitor_Y[i]] for i in range(num_utterances)]\n",
298
- " agent_Y_converted = [grouped_sentiments[agent_Y[i]] for i in range(num_utterances)]\n",
299
- "\n",
300
- "\n",
301
- " fig, ax = plt.subplots()\n",
302
- " sns.set(style=\"whitegrid\")\n",
303
- "\n",
304
- " ax.plot(X, visitor_Y_converted, label='Visitor', color='blue', marker='o')\n",
305
- " ax.plot(X, agent_Y_converted, label='Agent', color='green', marker='o')\n",
306
- "\n",
307
- " plt.legend(loc='upper left', bbox_to_anchor=(1,1))\n",
308
- " plt.subplots_adjust(right=0.8)\n",
309
- "\n",
310
- " plt.yticks(ticks=[-3,-2,-1,0,1,2,3])\n",
311
- " \n",
312
- " # y_labels = {-3: 'Disapproval/Accusatory/Denial/Obscene', -2: 'Anxious/Confused\\nAnnoyed/Remorse', -1: 'Uninterested', 0: 'Greeting/None',\n",
313
- " # 1: 'Informative', 2: 'Interest/Curiosity', 3: 'Acceptance/Openness'}\n",
314
- " \n",
315
- " # cell_text = [[label] for label in y_labels.values()]\n",
316
- " # plt.table(cellText=cell_text, rowLabels=list(y_labels.keys()), loc='left')\n",
317
- "\n",
318
- " # plt.tick_params(axis='y', labelsize=10)\n",
319
- "\n",
320
- " plt.xlabel('Timestamp')\n",
321
- " plt.ylabel('Sentiment Score')\n",
322
- " plt.title('Sentiment Flow Plot')\n",
323
- "\n",
324
- " plt.close(fig)\n",
325
- "\n",
326
- " return fig\n",
327
- " \n",
328
- " fig = sentiment_flow_plot(analysis)\n",
329
- "\n",
330
- " return response.choices[0].message.content, fig\n",
331
- "\n",
332
- "def set_key(key):\n",
333
- " os.environ['OPENAI_API_KEY'] = key\n",
334
- " load_dotenv()\n",
335
- " return"
336
- ]
337
- },
338
- {
339
- "cell_type": "code",
340
- "execution_count": 51,
341
- "metadata": {},
342
- "outputs": [],
343
- "source": [
344
- "aligned_markdown_table = \"\"\"\n",
345
- "<div style='text-align: right; font-size: small;'>\n",
346
- "\n",
347
- "| Sentiment Score | Sentiment Label |\n",
348
- "|:---------------:|:---------------:|\n",
349
- "| 3 | Acceptance, Openness |\n",
350
- "| 2 | Interest, Curiosity |\n",
351
- "| 1 | Informative |\n",
352
- "| 0 | Greeting |\n",
353
- "| -1 | Uninterested |\n",
354
- "| -2 | Anxious, Confused, Annoyed, Remorse |\n",
355
- "| -3 | Disapproval, Accusatory, Denial, Obscene |\n",
356
- "\n",
357
- "</div>\n",
358
- "\"\"\""
359
- ]
360
- },
361
- {
362
- "cell_type": "code",
363
- "execution_count": 32,
364
- "metadata": {},
365
- "outputs": [],
366
- "source": [
367
- "import gradio as gr"
368
- ]
369
- },
370
- {
371
- "cell_type": "code",
372
- "execution_count": 66,
373
- "metadata": {},
374
- "outputs": [],
375
- "source": [
376
- "with gr.Blocks() as gpt_analysis:\n",
377
- " gr.Markdown(\"## Conversation Sentiment Analysis Report\")\n",
378
- " gr.Markdown(\n",
379
- " \"This is a custom GPT model designed to provide \\\n",
380
- " a report on overall sentiment flow of the conversation on the \\\n",
381
- " volunteer's perspective. It also provies a live plot analysis of sentiments throughout the conversation.<br /><br />Click on them and submit them to the model to see how it works.\")\n",
382
- " api_key = gr.Textbox(label=\"Key\", lines=1)\n",
383
- " btn_key = gr.Button(value=\"Submit Key\")\n",
384
- " btn_key.click(set_key, inputs=api_key)\n",
385
- " with gr.Row():\n",
386
- " with gr.Column():\n",
387
- " conversation = gr.Textbox(label=\"Input\", lines=4)\n",
388
- " with gr.Column():\n",
389
- " output_box = gr.Textbox(value=\"\", label=\"Output\",lines=4)\n",
390
- " btn = gr.Button(value=\"Submit\")\n",
391
- " with gr.Row():\n",
392
- " with gr.Column(scale=2):\n",
393
- " gr.Markdown(aligned_markdown_table)\n",
394
- " with gr.Column(scale=2):\n",
395
- " plot_box = gr.Plot(label=\"Analysis Plot\")\n",
396
- "\n",
397
- " \n",
398
- " btn.click(get_completion, inputs=conversation, outputs=[output_box, plot_box])\n"
399
- ]
400
- },
401
- {
402
- "cell_type": "code",
403
- "execution_count": 67,
404
- "metadata": {},
405
- "outputs": [
406
- {
407
- "name": "stdout",
408
- "output_type": "stream",
409
- "text": [
410
- "Running on local URL: http://127.0.0.1:7883\n",
411
- "\n",
412
- "To create a public link, set `share=True` in `launch()`.\n"
413
- ]
414
- },
415
- {
416
- "data": {
417
- "text/plain": []
418
- },
419
- "execution_count": 67,
420
- "metadata": {},
421
- "output_type": "execute_result"
422
- }
423
- ],
424
- "source": [
425
- "gr.TabbedInterface([gpt_analysis], [\"GPT Anlysis\"]).launch(inline=False)"
426
- ]
427
- },
428
- {
429
- "cell_type": "code",
430
- "execution_count": null,
431
- "metadata": {},
432
- "outputs": [],
433
- "source": []
434
- }
435
- ],
436
- "metadata": {
437
- "kernelspec": {
438
- "display_name": "base",
439
- "language": "python",
440
- "name": "python3"
441
- },
442
- "language_info": {
443
- "codemirror_mode": {
444
- "name": "ipython",
445
- "version": 3
446
- },
447
- "file_extension": ".py",
448
- "mimetype": "text/x-python",
449
- "name": "python",
450
- "nbconvert_exporter": "python",
451
- "pygments_lexer": "ipython3",
452
- "version": "3.9.13"
453
- }
454
- },
455
- "nbformat": 4,
456
- "nbformat_minor": 2
457
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/__init__.py CHANGED
@@ -35,3 +35,11 @@ comet = CSKFeatureExtractor(dir=EXTRACTORS_PATH, device="cpu")
35
  cosmic_args = parse_cosmic_args()
36
 
37
  COSMIC_MODEL = load_model(EPIK_MODEL_DIR + "/epik/best_model.pt", cosmic_args)
 
 
 
 
 
 
 
 
 
35
  cosmic_args = parse_cosmic_args()
36
 
37
  COSMIC_MODEL = load_model(EPIK_MODEL_DIR + "/epik/best_model.pt", cosmic_args)
38
+
39
+ PATH_TO_DEBERTA = "./Model/DeBERTa"
40
+ sys.path.append(PATH_TO_DEBERTA)
41
+
42
+ from Model.DeBERTa.deberta import load_model, deberta_init
43
+
44
+ cfg, tokenizer = deberta_init()
45
+ deberta_model = load_model(cfg, PATH_TO_DEBERTA)
app/deberta_view.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from app.utils import (
3
+ create_input_instruction,
4
+ format_prediction_ouptut,
5
+ display_sentiment_score_table,
6
+ sentiment_flow_plot,
7
+ EXAMPLE_CONVERSATIONS,
8
+ )
9
+
10
+ import sys
11
+
12
+ sys.path.insert(0, "../") # neccesary to load modules outside of app
13
+
14
+ from app import deberta_model, tokenizer
15
+ from preprocessing import preprocess
16
+ from Model.DeBERTa.deberta import predict, decode_deberta_label
17
+
18
+
19
+ def deberta_preprocess(input):
20
+ result = preprocess.process_user_input(input)
21
+
22
+ if not result["success"]:
23
+ raise gr.Error(result["message"])
24
+
25
+ data = result["data"]
26
+ speakers = [item[1] for item in data]
27
+ messages = [item[2] for item in data]
28
+ return speakers, messages
29
+
30
+
31
+ def deberta_classifier(input):
32
+ speakers, messages = deberta_preprocess(input)
33
+
34
+ predictions = predict(deberta_model, tokenizer, messages)
35
+
36
+ # Assuming that there's only one conversation
37
+ labels = [decode_deberta_label(pred) for pred in predictions]
38
+ output = format_prediction_ouptut(speakers, messages, labels)
39
+
40
+ return output
41
+
42
+
43
+ def deberta_ui():
44
+ with gr.Blocks() as deberta_model:
45
+ gr.Markdown(
46
+ """
47
+ # Deberta
48
+ Building upon the DeBERTa architecture, the model was customized and
49
+ retrained on Epik data to classify messages between Visitors and Agents into
50
+ corresponding sentiment labels. At the time of training by the team prior to
51
+ the Fall 2023 semester, the model was trained on 15 labels, including
52
+ Openness, Anxiety, Confusion, Disapproval, Remorse, Accusation, Denial,
53
+ Obscenity, Disinterest, Annoyance, Information, Greeting, Interest,
54
+ Curiosity, or Acceptance.
55
+
56
+ The primary difference between DeBERTa and COSMIC is that while DeBERTa's
57
+ prediction is solely based on its own context, COSMIC uses the context of
58
+ the entire conversation (i.e., all messages from the chat history of the
59
+ conversation).
60
+ """
61
+ )
62
+
63
+ create_input_instruction()
64
+ with gr.Row():
65
+ with gr.Column():
66
+ example_dropdown = gr.Dropdown(
67
+ choices=["-- Not Selected --"] + list(EXAMPLE_CONVERSATIONS.keys()),
68
+ value="-- Not Selected --",
69
+ label="Select an example",
70
+ )
71
+
72
+ gr.Markdown('<p style="text-align: center;color: gray;">--- OR ---</p>')
73
+
74
+ conversation_input = gr.TextArea(
75
+ value="",
76
+ label="Input you conversation",
77
+ placeholder="Plese input your conversation here",
78
+ lines=15,
79
+ max_lines=15,
80
+ )
81
+
82
+ def on_example_change(input):
83
+ if input in EXAMPLE_CONVERSATIONS:
84
+ return EXAMPLE_CONVERSATIONS[input]
85
+
86
+ return ""
87
+
88
+ example_dropdown.input(
89
+ on_example_change,
90
+ inputs=example_dropdown,
91
+ outputs=conversation_input,
92
+ )
93
+
94
+ with gr.Column():
95
+ output = gr.Textbox(
96
+ value="",
97
+ label="Predicted Sentiment Labels",
98
+ lines=22,
99
+ max_lines=22,
100
+ interactive=False,
101
+ )
102
+ submit_btn = gr.Button(value="Submit")
103
+ submit_btn.click(deberta_classifier, conversation_input, output)
104
+
105
+ # reset the output whenever a change in the input is detected
106
+ conversation_input.change(lambda x: "", conversation_input, output)
107
+
108
+ gr.Markdown("# Sentiment Flow Plot")
109
+ with gr.Row():
110
+ with gr.Column(scale=1):
111
+ display_sentiment_score_table()
112
+ with gr.Column(scale=2):
113
+ plot_box = gr.Plot(label="Analysis Plot")
114
+
115
+ plot_btn = gr.Button(value="Plot Sentiment Flow")
116
+ plot_btn.click(sentiment_flow_plot, inputs=[output], outputs=[plot_box])
117
+
118
+ # reset all outputs whenever a change in the input is detected
119
+ conversation_input.change(
120
+ lambda x: ("", None),
121
+ conversation_input,
122
+ outputs=[output, plot_box],
123
+ )
124
+ return deberta_model
app_spring2023.ipynb DELETED
@@ -1,483 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "metadata": {},
7
- "outputs": [],
8
- "source": [
9
- "import numpy as np\n",
10
- "import tensorflow as tf\n",
11
- "import tensorflow_addons as tfa\n",
12
- "from tensorflow.keras import layers\n",
13
- "import transformers\n",
14
- "import sentencepiece as spm\n",
15
- "#show the version of the package imported with text instructions\\\n",
16
- "print(\"Tensorflow version: \", tf.__version__)\n",
17
- "print(\"Tensorflow Addons version: \", tfa.__version__)\n",
18
- "print(\"Transformers version: \", transformers.__version__)\n",
19
- "print(\"Sentencepiece version: \", spm.__version__)\n",
20
- "print(\"Numpy version: \", np.__version__)"
21
- ]
22
- },
23
- {
24
- "cell_type": "code",
25
- "execution_count": null,
26
- "metadata": {},
27
- "outputs": [],
28
- "source": [
29
- "class MeanPool(tf.keras.layers.Layer):\n",
30
- " def call(self, inputs, mask=None):\n",
31
- " broadcast_mask = tf.expand_dims(tf.cast(mask, \"float32\"), -1)\n",
32
- " embedding_sum = tf.reduce_sum(inputs * broadcast_mask, axis=1)\n",
33
- " mask_sum = tf.reduce_sum(broadcast_mask, axis=1)\n",
34
- " mask_sum = tf.math.maximum(mask_sum, tf.constant([1e-9]))\n",
35
- " return embedding_sum / mask_sum\n",
36
- "class WeightsSumOne(tf.keras.constraints.Constraint):\n",
37
- " def __call__(self, w):\n",
38
- " return tf.nn.softmax(w, axis=0)"
39
- ]
40
- },
41
- {
42
- "cell_type": "code",
43
- "execution_count": null,
44
- "metadata": {},
45
- "outputs": [],
46
- "source": [
47
- "tokenizer = transformers.AutoTokenizer.from_pretrained(\"microsoft/deberta-v3-large\"\n",
48
- ")\n",
49
- "tokenizer.save_pretrained('./tokenizer/')\n",
50
- "\n",
51
- "cfg = transformers.AutoConfig.from_pretrained(\"microsoft/deberta-v3-large\", output_hidden_states=True)\n",
52
- "cfg.hidden_dropout_prob = 0\n",
53
- "cfg.attention_probs_dropout_prob = 0\n",
54
- "cfg.save_pretrained('./tokenizer/')"
55
- ]
56
- },
57
- {
58
- "cell_type": "code",
59
- "execution_count": null,
60
- "metadata": {},
61
- "outputs": [],
62
- "source": [
63
- "def deberta_encode(texts, tokenizer=tokenizer):\n",
64
- " input_ids = []\n",
65
- " attention_mask = []\n",
66
- " \n",
67
- " for text in texts:\n",
68
- " token = tokenizer(text, \n",
69
- " add_special_tokens=True, \n",
70
- " max_length=512, \n",
71
- " return_attention_mask=True, \n",
72
- " return_tensors=\"np\", \n",
73
- " truncation=True, \n",
74
- " padding='max_length')\n",
75
- " input_ids.append(token['input_ids'][0])\n",
76
- " attention_mask.append(token['attention_mask'][0])\n",
77
- " \n",
78
- " return np.array(input_ids, dtype=\"int32\"), np.array(attention_mask, dtype=\"int32\")"
79
- ]
80
- },
81
- {
82
- "cell_type": "code",
83
- "execution_count": null,
84
- "metadata": {},
85
- "outputs": [],
86
- "source": [
87
- "MAX_LENGTH=512\n",
88
- "BATCH_SIZE=8"
89
- ]
90
- },
91
- {
92
- "cell_type": "code",
93
- "execution_count": null,
94
- "metadata": {},
95
- "outputs": [],
96
- "source": [
97
- "def get_model():\n",
98
- " input_ids = tf.keras.layers.Input(\n",
99
- " shape=(MAX_LENGTH,), dtype=tf.int32, name=\"input_ids\"\n",
100
- " )\n",
101
- " \n",
102
- " attention_masks = tf.keras.layers.Input(\n",
103
- " shape=(MAX_LENGTH,), dtype=tf.int32, name=\"attention_masks\"\n",
104
- " )\n",
105
- " \n",
106
- " deberta_model = transformers.TFAutoModel.from_pretrained(\"microsoft/deberta-v3-large\", config=cfg)\n",
107
- " \n",
108
- " \n",
109
- " REINIT_LAYERS = 1\n",
110
- " normal_initializer = tf.keras.initializers.GlorotUniform()\n",
111
- " zeros_initializer = tf.keras.initializers.Zeros()\n",
112
- " ones_initializer = tf.keras.initializers.Ones()\n",
113
- "\n",
114
- "# print(f'\\nRe-initializing encoder block:')\n",
115
- " for encoder_block in deberta_model.deberta.encoder.layer[-REINIT_LAYERS:]:\n",
116
- "# print(f'{encoder_block}')\n",
117
- " for layer in encoder_block.submodules:\n",
118
- " if isinstance(layer, tf.keras.layers.Dense):\n",
119
- " layer.kernel.assign(normal_initializer(shape=layer.kernel.shape, dtype=layer.kernel.dtype))\n",
120
- " if layer.bias is not None:\n",
121
- " layer.bias.assign(zeros_initializer(shape=layer.bias.shape, dtype=layer.bias.dtype))\n",
122
- "\n",
123
- " elif isinstance(layer, tf.keras.layers.LayerNormalization):\n",
124
- " layer.beta.assign(zeros_initializer(shape=layer.beta.shape, dtype=layer.beta.dtype))\n",
125
- " layer.gamma.assign(ones_initializer(shape=layer.gamma.shape, dtype=layer.gamma.dtype))\n",
126
- "\n",
127
- " deberta_output = deberta_model.deberta(\n",
128
- " input_ids, attention_mask=attention_masks\n",
129
- " )\n",
130
- " hidden_states = deberta_output.hidden_states\n",
131
- " \n",
132
- " #WeightedLayerPool + MeanPool of the last 4 hidden states\n",
133
- " stack_meanpool = tf.stack(\n",
134
- " [MeanPool()(hidden_s, mask=attention_masks) for hidden_s in hidden_states[-4:]], \n",
135
- " axis=2)\n",
136
- " \n",
137
- " weighted_layer_pool = layers.Dense(1,\n",
138
- " use_bias=False,\n",
139
- " kernel_constraint=WeightsSumOne())(stack_meanpool)\n",
140
- " \n",
141
- " weighted_layer_pool = tf.squeeze(weighted_layer_pool, axis=-1)\n",
142
- " output=layers.Dense(15,activation='linear')(weighted_layer_pool)\n",
143
- " #x = layers.Dense(6, activation='linear')(x)\n",
144
- " \n",
145
- " #output = layers.Rescaling(scale=4.0, offset=1.0)(x)\n",
146
- " model = tf.keras.Model(inputs=[input_ids, attention_masks], outputs=output)\n",
147
- " \n",
148
- " #Compile model with Layer-wise Learning Rate Decay\n",
149
- " layer_list = [deberta_model.deberta.embeddings] + list(deberta_model.deberta.encoder.layer)\n",
150
- " layer_list.reverse()\n",
151
- " \n",
152
- " INIT_LR = 1e-5\n",
153
- " LLRDR = 0.9\n",
154
- " LR_SCH_DECAY_STEPS = 1600\n",
155
- " \n",
156
- " lr_schedules = [tf.keras.optimizers.schedules.ExponentialDecay(\n",
157
- " initial_learning_rate=INIT_LR * LLRDR ** i, \n",
158
- " decay_steps=LR_SCH_DECAY_STEPS, \n",
159
- " decay_rate=0.3) for i in range(len(layer_list))]\n",
160
- " lr_schedule_head = tf.keras.optimizers.schedules.ExponentialDecay(\n",
161
- " initial_learning_rate=1e-4, \n",
162
- " decay_steps=LR_SCH_DECAY_STEPS, \n",
163
- " decay_rate=0.3)\n",
164
- " \n",
165
- " optimizers = [tf.keras.optimizers.Adam(learning_rate=lr_sch) for lr_sch in lr_schedules]\n",
166
- " \n",
167
- " optimizers_and_layers = [(tf.keras.optimizers.Adam(learning_rate=lr_schedule_head), model.layers[-4:])] +\\\n",
168
- " list(zip(optimizers, layer_list))\n",
169
- " \n",
170
- " optimizer = tfa.optimizers.MultiOptimizer(optimizers_and_layers)\n",
171
- " \n",
172
- " model.compile(optimizer=optimizer,\n",
173
- " loss='mse',\n",
174
- " metrics=[tf.keras.metrics.RootMeanSquaredError()],\n",
175
- " )\n",
176
- " return model"
177
- ]
178
- },
179
- {
180
- "cell_type": "code",
181
- "execution_count": null,
182
- "metadata": {},
183
- "outputs": [],
184
- "source": [
185
- "tf.keras.backend.clear_session()\n",
186
- "model = get_model()\n",
187
- "model.load_weights('./best_model_fold2.h5')"
188
- ]
189
- },
190
- {
191
- "cell_type": "code",
192
- "execution_count": null,
193
- "metadata": {},
194
- "outputs": [],
195
- "source": []
196
- },
197
- {
198
- "cell_type": "code",
199
- "execution_count": null,
200
- "metadata": {},
201
- "outputs": [],
202
- "source": [
203
- "# map the integer labels to their original string representation\n",
204
- "label_mapping = {\n",
205
- " 0: 'Greeting',\n",
206
- " 1: 'Curiosity',\n",
207
- " 2: 'Interest',\n",
208
- " 3: 'Obscene',\n",
209
- " 4: 'Annoyed',\n",
210
- " 5: 'Openness',\n",
211
- " 6: 'Anxious',\n",
212
- " 7: 'Acceptance',\n",
213
- " 8: 'Uninterested',\n",
214
- " 9: 'Informative',\n",
215
- " 10: 'Accusatory',\n",
216
- " 11: 'Denial',\n",
217
- " 12: 'Confused',\n",
218
- " 13: 'Disapproval',\n",
219
- " 14: 'Remorse'\n",
220
- "}\n",
221
- "\n",
222
- "#label_strings = [label_mapping[label] for label in labels]\n",
223
- "\n",
224
- "#print(label_strings)"
225
- ]
226
- },
227
- {
228
- "cell_type": "code",
229
- "execution_count": null,
230
- "metadata": {},
231
- "outputs": [],
232
- "source": [
233
- "def inference(texts):\n",
234
- " prediction = model.predict(deberta_encode([texts]))\n",
235
- " labels = np.argmax(prediction, axis=1)\n",
236
- " label_strings = [label_mapping[label] for label in labels]\n",
237
- " return label_strings[0]"
238
- ]
239
- },
240
- {
241
- "cell_type": "markdown",
242
- "metadata": {},
243
- "source": [
244
- "# GPT"
245
- ]
246
- },
247
- {
248
- "cell_type": "code",
249
- "execution_count": null,
250
- "metadata": {},
251
- "outputs": [],
252
- "source": [
253
- "import openai\n",
254
- "import os\n",
255
- "import pandas as pd\n",
256
- "import gradio as gr"
257
- ]
258
- },
259
- {
260
- "cell_type": "code",
261
- "execution_count": null,
262
- "metadata": {},
263
- "outputs": [],
264
- "source": [
265
- "openai.organization = os.environ['org_id']\n",
266
- "openai.api_key = os.environ['openai_api']\n",
267
- "model_version = \"gpt-3.5-turbo\"\n",
268
- "model_token_limit = 10\n",
269
- "model_temperature = 0.1\n"
270
- ]
271
- },
272
- {
273
- "cell_type": "code",
274
- "execution_count": null,
275
- "metadata": {},
276
- "outputs": [],
277
- "source": [
278
- "def generatePrompt () :\n",
279
- " labels = [\"Openness\", \n",
280
- " \"Anxious\",\n",
281
- " \"Confused\",\n",
282
- " \"Disapproval\",\n",
283
- " \"Remorse\",\n",
284
- " \"Uninterested\",\n",
285
- " \"Accusatory\",\n",
286
- " \"Annoyed\",\n",
287
- " \"Interest\",\n",
288
- " \"Curiosity\",\n",
289
- " \"Acceptance\",\n",
290
- " \"Obscene\",\n",
291
- " \"Denial\",\n",
292
- " \"Informative\",\n",
293
- " \"Greeting\"]\n",
294
- "\n",
295
- " formatted_labels = ', '.join(labels[:-1]) + ', or ' + labels[-1] + '.'\n",
296
- "\n",
297
- " label_set = [\"Openness\", \"Anxious\", \"Confused\", \"Disapproval\", \"Remorse\", \"Accusatory\",\n",
298
- " \"Denial\", \"Obscene\", \"Uninterested\", \"Annoyed\", \"Informative\", \"Greeting\",\n",
299
- " \"Interest\", \"Curiosity\", \"Acceptance\"]\n",
300
- "\n",
301
- " formatted_labels = ', '.join(label_set[:-1]) + ', or ' + label_set[-1] + '.\\n'\n",
302
- "\n",
303
- " # The basic task to assign GPT (in natural language)\n",
304
- " base_task = \"Classify the following text messages into one of the following categories using one word: \" + formatted_labels\n",
305
- " base_task += \"Provide only a one word response. Use only the labels provided.\\n\"\n",
306
- "\n",
307
- " return base_task"
308
- ]
309
- },
310
- {
311
- "cell_type": "code",
312
- "execution_count": null,
313
- "metadata": {},
314
- "outputs": [],
315
- "source": [
316
- "def predict(message):\n",
317
- " \n",
318
- " prompt = [{\"role\": \"user\", \"content\": generatePrompt () + \"Text: \"+ message}]\n",
319
- " \n",
320
- " response = openai.ChatCompletion.create(\n",
321
- " model=model_version,\n",
322
- " temperature=model_temperature,\n",
323
- " max_tokens=model_token_limit,\n",
324
- " messages=prompt\n",
325
- " )\n",
326
- " \n",
327
- " return response[\"choices\"][0][\"message\"][\"content\"]"
328
- ]
329
- },
330
- {
331
- "cell_type": "markdown",
332
- "metadata": {},
333
- "source": [
334
- "# Update"
335
- ]
336
- },
337
- {
338
- "cell_type": "code",
339
- "execution_count": null,
340
- "metadata": {},
341
- "outputs": [],
342
- "source": [
343
- "model_version = \"gpt-3.5-turbo\"\n",
344
- "model_token_limit = 2000\n",
345
- "model_temperature = 0.1"
346
- ]
347
- },
348
- {
349
- "cell_type": "code",
350
- "execution_count": null,
351
- "metadata": {},
352
- "outputs": [],
353
- "source": [
354
- "def revision(message):\n",
355
- " base_prompt = \"Here is a conversation between a Caller and a Volunteer. The Volunteer is trying to be as non-accusatory as possible but also wants to get as much information about the caller as possible. What should the volunteer say next in this exchange? Proved 3 possible responses.\"\n",
356
- "\n",
357
- " prompt = [{\"role\": \"user\", \"content\": base_prompt + message}]\n",
358
- " \n",
359
- " response = openai.ChatCompletion.create(\n",
360
- " model=model_version,\n",
361
- " temperature=model_temperature,\n",
362
- " max_tokens=model_token_limit,\n",
363
- " messages=prompt\n",
364
- " )\n",
365
- "\n",
366
- " return response[\"choices\"][0][\"message\"][\"content\"]"
367
- ]
368
- },
369
- {
370
- "cell_type": "code",
371
- "execution_count": null,
372
- "metadata": {},
373
- "outputs": [],
374
- "source": [
375
- "import gradio as gr\n",
376
- "\n",
377
- "def combine(a):\n",
378
- " return a + \"hello\"\n",
379
- "\n",
380
- "\n",
381
- "\n",
382
- "\n",
383
- "with gr.Blocks() as demo:\n",
384
- " gr.Markdown(\"## DeBERTa Sentiment Analysis\")\n",
385
- " gr.Markdown(\"This is a custom DeBERTa model architecture for sentiment analysis with 15 labels: Openness, Anxiety, Confusion, Disapproval, Remorse, Accusation, Denial, Obscenity, Disinterest, Annoyance, Information, Greeting, Interest, Curiosity, or Acceptance.<br />Please enter your sentence(s) in the input box below and click the Submit button. The model will then process the input and provide the sentiment in one of the labels.<br/>The Test Example section below provides some input examples. Click on them and submit them to the model to see how it works.\")\n",
386
- "\n",
387
- " txt = gr.Textbox(label=\"Input\", lines=2)\n",
388
- " txt_1 = gr.Textbox(value=\"\", label=\"Output\")\n",
389
- " btn = gr.Button(value=\"Submit\")\n",
390
- " btn.click(inference, inputs=txt, outputs= txt_1)\n",
391
- "\n",
392
- " demoExample = [\n",
393
- " \"Hello, how are you?\",\n",
394
- " \"I am so happy to be here!\",\n",
395
- " \"i don't have time for u\"\n",
396
- " ]\n",
397
- "\n",
398
- " gr.Markdown(\"## Text Examples\")\n",
399
- " gr.Examples(\n",
400
- " demoExample,\n",
401
- " txt,\n",
402
- " txt_1,\n",
403
- " inference\n",
404
- " )\n",
405
- "\n",
406
- "with gr.Blocks() as gptdemo:\n",
407
- "\n",
408
- " gr.Markdown(\"## GPT Sentiment Analysis\")\n",
409
- " gr.Markdown(\"This a custom GPT model for sentiment analysis with 15 labels: Openness, Anxiety, Confusion, Disapproval, Remorse, Accusation, Denial, Obscenity, Disinterest, Annoyance, Information, Greeting, Interest, Curiosity, or Acceptance.<br />Please enter your sentence(s) in the input box below and click the Submit button. The model will then process the input and provide the sentiment in one of the labels.<br />The Test Example section below provides some input examples. Click on them and submit them to the model to see how it works.Please note that the input may be collected by service providers.\")\n",
410
- " txt = gr.Textbox(label=\"Input\", lines=2)\n",
411
- " txt_1 = gr.Textbox(value=\"\", label=\"Output\")\n",
412
- " btn = gr.Button(value=\"Submit\")\n",
413
- " btn.click(predict, inputs=txt, outputs= txt_1)\n",
414
- "\n",
415
- " gptExample = [\n",
416
- " \"Hello, how are you?\",\n",
417
- " \"Are you busy at the moment?\",\n",
418
- " \"I'm doing real good\"\n",
419
- " ]\n",
420
- "\n",
421
- " gr.Markdown(\"## Text Examples\")\n",
422
- " gr.Examples(\n",
423
- " gptExample,\n",
424
- " txt,\n",
425
- " txt_1,\n",
426
- " predict\n",
427
- " )\n",
428
- "\n",
429
- "\n",
430
- "with gr.Blocks() as revisiondemo:\n",
431
- " gr.Markdown(\"## Conversation Revision\")\n",
432
- " gr.Markdown(\"This is a custom GPT model designed to generate possible response texts based on previous contexts. You can input a conversation between a caller and a volunteer, and the model will provide three possible responses based on the input. <br />The Test Example section below provides some input examples. Click on them and submit them to the model to see how it works. Please note that the input may be collected by service providers.\")\n",
433
- " txt = gr.Textbox(label=\"Input\", lines=2)\n",
434
- " txt_1 = gr.Textbox(value=\"\", label=\"Output\",lines=4)\n",
435
- " btn = gr.Button(value=\"Submit\")\n",
436
- " btn.click(revision, inputs=txt, outputs= txt_1)\n",
437
- "\n",
438
- " revisionExample = [\"Caller: sup\\nVolunteer: Hey, how's it going?\\nCaller: not very well, actually\\nVolunteer: What's the matter?\\nCaller: it's my wife, don't worry about it\"]\n",
439
- "\n",
440
- " with gr.Column():\n",
441
- " gr.Markdown(\"## Text Examples\")\n",
442
- " gr.Examples(\n",
443
- " revisionExample,\n",
444
- " [txt],\n",
445
- " txt_1,\n",
446
- " revision\n",
447
- " )\n",
448
- "\n",
449
- "\n",
450
- "\n",
451
- "\n",
452
- "gr.TabbedInterface([demo, gptdemo,revisiondemo], [\"Model\", \"GPT\",\"Text Revision\"]\n",
453
- ").launch(inline=False)"
454
- ]
455
- }
456
- ],
457
- "metadata": {
458
- "kernelspec": {
459
- "display_name": "Python 3",
460
- "language": "python",
461
- "name": "python3"
462
- },
463
- "language_info": {
464
- "codemirror_mode": {
465
- "name": "ipython",
466
- "version": 3
467
- },
468
- "file_extension": ".py",
469
- "mimetype": "text/x-python",
470
- "name": "python",
471
- "nbconvert_exporter": "python",
472
- "pygments_lexer": "ipython3",
473
- "version": "3.10.9"
474
- },
475
- "vscode": {
476
- "interpreter": {
477
- "hash": "76d9096663e4677afe736ff46b3dcdaff586dfdb471519f50b872333a086db78"
478
- }
479
- }
480
- },
481
- "nbformat": 4,
482
- "nbformat_minor": 2
483
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_spring2023.py DELETED
@@ -1,396 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding: utf-8
3
-
4
- # In[ ]:
5
-
6
-
7
- import numpy as np
8
- import tensorflow as tf
9
- import tensorflow_addons as tfa
10
- from tensorflow.keras import layers
11
- import transformers
12
- import sentencepiece as spm
13
- #show the version of the package imported with text instructions\
14
- print("Tensorflow version: ", tf.__version__)
15
- print("Tensorflow Addons version: ", tfa.__version__)
16
- print("Transformers version: ", transformers.__version__)
17
- print("Sentencepiece version: ", spm.__version__)
18
- print("Numpy version: ", np.__version__)
19
-
20
-
21
- # In[ ]:
22
-
23
-
24
- class MeanPool(tf.keras.layers.Layer):
25
- def call(self, inputs, mask=None):
26
- broadcast_mask = tf.expand_dims(tf.cast(mask, "float32"), -1)
27
- embedding_sum = tf.reduce_sum(inputs * broadcast_mask, axis=1)
28
- mask_sum = tf.reduce_sum(broadcast_mask, axis=1)
29
- mask_sum = tf.math.maximum(mask_sum, tf.constant([1e-9]))
30
- return embedding_sum / mask_sum
31
- class WeightsSumOne(tf.keras.constraints.Constraint):
32
- def __call__(self, w):
33
- return tf.nn.softmax(w, axis=0)
34
-
35
-
36
- # In[ ]:
37
-
38
-
39
- tokenizer = transformers.AutoTokenizer.from_pretrained("microsoft/deberta-v3-large"
40
- )
41
- tokenizer.save_pretrained('./tokenizer/')
42
-
43
- cfg = transformers.AutoConfig.from_pretrained("microsoft/deberta-v3-large", output_hidden_states=True)
44
- cfg.hidden_dropout_prob = 0
45
- cfg.attention_probs_dropout_prob = 0
46
- cfg.save_pretrained('./tokenizer/')
47
-
48
-
49
- # In[ ]:
50
-
51
-
52
- def deberta_encode(texts, tokenizer=tokenizer):
53
- input_ids = []
54
- attention_mask = []
55
-
56
- for text in texts:
57
- token = tokenizer(text,
58
- add_special_tokens=True,
59
- max_length=512,
60
- return_attention_mask=True,
61
- return_tensors="np",
62
- truncation=True,
63
- padding='max_length')
64
- input_ids.append(token['input_ids'][0])
65
- attention_mask.append(token['attention_mask'][0])
66
-
67
- return np.array(input_ids, dtype="int32"), np.array(attention_mask, dtype="int32")
68
-
69
-
70
- # In[ ]:
71
-
72
-
73
- MAX_LENGTH=512
74
- BATCH_SIZE=8
75
-
76
-
77
- # In[ ]:
78
-
79
-
80
- def get_model():
81
- input_ids = tf.keras.layers.Input(
82
- shape=(MAX_LENGTH,), dtype=tf.int32, name="input_ids"
83
- )
84
-
85
- attention_masks = tf.keras.layers.Input(
86
- shape=(MAX_LENGTH,), dtype=tf.int32, name="attention_masks"
87
- )
88
-
89
- deberta_model = transformers.TFAutoModel.from_pretrained("microsoft/deberta-v3-large", config=cfg)
90
-
91
-
92
- REINIT_LAYERS = 1
93
- normal_initializer = tf.keras.initializers.GlorotUniform()
94
- zeros_initializer = tf.keras.initializers.Zeros()
95
- ones_initializer = tf.keras.initializers.Ones()
96
-
97
- # print(f'\nRe-initializing encoder block:')
98
- for encoder_block in deberta_model.deberta.encoder.layer[-REINIT_LAYERS:]:
99
- # print(f'{encoder_block}')
100
- for layer in encoder_block.submodules:
101
- if isinstance(layer, tf.keras.layers.Dense):
102
- layer.kernel.assign(normal_initializer(shape=layer.kernel.shape, dtype=layer.kernel.dtype))
103
- if layer.bias is not None:
104
- layer.bias.assign(zeros_initializer(shape=layer.bias.shape, dtype=layer.bias.dtype))
105
-
106
- elif isinstance(layer, tf.keras.layers.LayerNormalization):
107
- layer.beta.assign(zeros_initializer(shape=layer.beta.shape, dtype=layer.beta.dtype))
108
- layer.gamma.assign(ones_initializer(shape=layer.gamma.shape, dtype=layer.gamma.dtype))
109
-
110
- deberta_output = deberta_model.deberta(
111
- input_ids, attention_mask=attention_masks
112
- )
113
- hidden_states = deberta_output.hidden_states
114
-
115
- #WeightedLayerPool + MeanPool of the last 4 hidden states
116
- stack_meanpool = tf.stack(
117
- [MeanPool()(hidden_s, mask=attention_masks) for hidden_s in hidden_states[-4:]],
118
- axis=2)
119
-
120
- weighted_layer_pool = layers.Dense(1,
121
- use_bias=False,
122
- kernel_constraint=WeightsSumOne())(stack_meanpool)
123
-
124
- weighted_layer_pool = tf.squeeze(weighted_layer_pool, axis=-1)
125
- output=layers.Dense(15,activation='linear')(weighted_layer_pool)
126
- #x = layers.Dense(6, activation='linear')(x)
127
-
128
- #output = layers.Rescaling(scale=4.0, offset=1.0)(x)
129
- model = tf.keras.Model(inputs=[input_ids, attention_masks], outputs=output)
130
-
131
- #Compile model with Layer-wise Learning Rate Decay
132
- layer_list = [deberta_model.deberta.embeddings] + list(deberta_model.deberta.encoder.layer)
133
- layer_list.reverse()
134
-
135
- INIT_LR = 1e-5
136
- LLRDR = 0.9
137
- LR_SCH_DECAY_STEPS = 1600
138
-
139
- lr_schedules = [tf.keras.optimizers.schedules.ExponentialDecay(
140
- initial_learning_rate=INIT_LR * LLRDR ** i,
141
- decay_steps=LR_SCH_DECAY_STEPS,
142
- decay_rate=0.3) for i in range(len(layer_list))]
143
- lr_schedule_head = tf.keras.optimizers.schedules.ExponentialDecay(
144
- initial_learning_rate=1e-4,
145
- decay_steps=LR_SCH_DECAY_STEPS,
146
- decay_rate=0.3)
147
-
148
- optimizers = [tf.keras.optimizers.Adam(learning_rate=lr_sch) for lr_sch in lr_schedules]
149
-
150
- optimizers_and_layers = [(tf.keras.optimizers.Adam(learning_rate=lr_schedule_head), model.layers[-4:])] +\
151
- list(zip(optimizers, layer_list))
152
-
153
- optimizer = tfa.optimizers.MultiOptimizer(optimizers_and_layers)
154
-
155
- model.compile(optimizer=optimizer,
156
- loss='mse',
157
- metrics=[tf.keras.metrics.RootMeanSquaredError()],
158
- )
159
- return model
160
-
161
-
162
- # In[ ]:
163
-
164
-
165
- tf.keras.backend.clear_session()
166
- model = get_model()
167
- model.load_weights('./best_model_fold2.h5')
168
-
169
-
170
- # In[ ]:
171
-
172
-
173
-
174
-
175
-
176
- # In[ ]:
177
-
178
-
179
- # map the integer labels to their original string representation
180
- label_mapping = {
181
- 0: 'Greeting',
182
- 1: 'Curiosity',
183
- 2: 'Interest',
184
- 3: 'Obscene',
185
- 4: 'Annoyed',
186
- 5: 'Openness',
187
- 6: 'Anxious',
188
- 7: 'Acceptance',
189
- 8: 'Uninterested',
190
- 9: 'Informative',
191
- 10: 'Accusatory',
192
- 11: 'Denial',
193
- 12: 'Confused',
194
- 13: 'Disapproval',
195
- 14: 'Remorse'
196
- }
197
-
198
- #label_strings = [label_mapping[label] for label in labels]
199
-
200
- #print(label_strings)
201
-
202
-
203
- # In[ ]:
204
-
205
-
206
- def inference(texts):
207
- prediction = model.predict(deberta_encode([texts]))
208
- labels = np.argmax(prediction, axis=1)
209
- label_strings = [label_mapping[label] for label in labels]
210
- return label_strings[0]
211
-
212
-
213
- # # GPT
214
-
215
- # In[ ]:
216
-
217
-
218
- import openai
219
- import os
220
- import pandas as pd
221
- import gradio as gr
222
-
223
-
224
- # In[ ]:
225
-
226
-
227
- openai.organization = os.environ['org_id']
228
- openai.api_key = os.environ['openai_api']
229
- model_version = "gpt-3.5-turbo"
230
- model_token_limit = 10
231
- model_temperature = 0.1
232
-
233
-
234
- # In[ ]:
235
-
236
-
237
- def generatePrompt () :
238
- labels = ["Openness",
239
- "Anxious",
240
- "Confused",
241
- "Disapproval",
242
- "Remorse",
243
- "Uninterested",
244
- "Accusatory",
245
- "Annoyed",
246
- "Interest",
247
- "Curiosity",
248
- "Acceptance",
249
- "Obscene",
250
- "Denial",
251
- "Informative",
252
- "Greeting"]
253
-
254
- formatted_labels = ', '.join(labels[:-1]) + ', or ' + labels[-1] + '.'
255
-
256
- label_set = ["Openness", "Anxious", "Confused", "Disapproval", "Remorse", "Accusatory",
257
- "Denial", "Obscene", "Uninterested", "Annoyed", "Informative", "Greeting",
258
- "Interest", "Curiosity", "Acceptance"]
259
-
260
- formatted_labels = ', '.join(label_set[:-1]) + ', or ' + label_set[-1] + '.\n'
261
-
262
- # The basic task to assign GPT (in natural language)
263
- base_task = "Classify the following text messages into one of the following categories using one word: " + formatted_labels
264
- base_task += "Provide only a one word response. Use only the labels provided.\n"
265
-
266
- return base_task
267
-
268
-
269
- # In[ ]:
270
-
271
-
272
- def predict(message):
273
-
274
- prompt = [{"role": "user", "content": generatePrompt () + "Text: "+ message}]
275
-
276
- response = openai.ChatCompletion.create(
277
- model=model_version,
278
- temperature=model_temperature,
279
- max_tokens=model_token_limit,
280
- messages=prompt
281
- )
282
-
283
- return response["choices"][0]["message"]["content"]
284
-
285
-
286
- # # Update
287
-
288
- # In[ ]:
289
-
290
-
291
- model_version = "gpt-3.5-turbo"
292
- model_token_limit = 2000
293
- model_temperature = 0.1
294
-
295
-
296
- # In[ ]:
297
-
298
-
299
- def revision(message):
300
- base_prompt = "Here is a conversation between a Caller and a Volunteer. The Volunteer is trying to be as non-accusatory as possible but also wants to get as much information about the caller as possible. What should the volunteer say next in this exchange? Proved 3 possible responses."
301
-
302
- prompt = [{"role": "user", "content": base_prompt + message}]
303
-
304
- response = openai.ChatCompletion.create(
305
- model=model_version,
306
- temperature=model_temperature,
307
- max_tokens=model_token_limit,
308
- messages=prompt
309
- )
310
-
311
- return response["choices"][0]["message"]["content"]
312
-
313
-
314
- # In[ ]:
315
-
316
-
317
- import gradio as gr
318
-
319
- def combine(a):
320
- return a + "hello"
321
-
322
-
323
-
324
-
325
- with gr.Blocks() as demo:
326
- gr.Markdown("## DeBERTa Sentiment Analysis")
327
- gr.Markdown("This is a custom DeBERTa model architecture for sentiment analysis with 15 labels: Openness, Anxiety, Confusion, Disapproval, Remorse, Accusation, Denial, Obscenity, Disinterest, Annoyance, Information, Greeting, Interest, Curiosity, or Acceptance.<br />Please enter your sentence(s) in the input box below and click the Submit button. The model will then process the input and provide the sentiment in one of the labels.<br/>The Test Example section below provides some input examples. Click on them and submit them to the model to see how it works.")
328
-
329
- txt = gr.Textbox(label="Input", lines=2)
330
- txt_1 = gr.Textbox(value="", label="Output")
331
- btn = gr.Button(value="Submit")
332
- btn.click(inference, inputs=txt, outputs= txt_1)
333
-
334
- demoExample = [
335
- "Hello, how are you?",
336
- "I am so happy to be here!",
337
- "i don't have time for u"
338
- ]
339
-
340
- gr.Markdown("## Text Examples")
341
- gr.Examples(
342
- demoExample,
343
- txt,
344
- txt_1,
345
- inference
346
- )
347
-
348
- with gr.Blocks() as gptdemo:
349
-
350
- gr.Markdown("## GPT Sentiment Analysis")
351
- gr.Markdown("This a custom GPT model for sentiment analysis with 15 labels: Openness, Anxiety, Confusion, Disapproval, Remorse, Accusation, Denial, Obscenity, Disinterest, Annoyance, Information, Greeting, Interest, Curiosity, or Acceptance.<br />Please enter your sentence(s) in the input box below and click the Submit button. The model will then process the input and provide the sentiment in one of the labels.<br />The Test Example section below provides some input examples. Click on them and submit them to the model to see how it works.Please note that the input may be collected by service providers.")
352
- txt = gr.Textbox(label="Input", lines=2)
353
- txt_1 = gr.Textbox(value="", label="Output")
354
- btn = gr.Button(value="Submit")
355
- btn.click(predict, inputs=txt, outputs= txt_1)
356
-
357
- gptExample = [
358
- "Hello, how are you?",
359
- "Are you busy at the moment?",
360
- "I'm doing real good"
361
- ]
362
-
363
- gr.Markdown("## Text Examples")
364
- gr.Examples(
365
- gptExample,
366
- txt,
367
- txt_1,
368
- predict
369
- )
370
-
371
-
372
- with gr.Blocks() as revisiondemo:
373
- gr.Markdown("## Conversation Revision")
374
- gr.Markdown("This is a custom GPT model designed to generate possible response texts based on previous contexts. You can input a conversation between a caller and a volunteer, and the model will provide three possible responses based on the input. <br />The Test Example section below provides some input examples. Click on them and submit them to the model to see how it works. Please note that the input may be collected by service providers.")
375
- txt = gr.Textbox(label="Input", lines=2)
376
- txt_1 = gr.Textbox(value="", label="Output",lines=4)
377
- btn = gr.Button(value="Submit")
378
- btn.click(revision, inputs=txt, outputs= txt_1)
379
-
380
- revisionExample = ["Caller: sup\nVolunteer: Hey, how's it going?\nCaller: not very well, actually\nVolunteer: What's the matter?\nCaller: it's my wife, don't worry about it"]
381
-
382
- with gr.Column():
383
- gr.Markdown("## Text Examples")
384
- gr.Examples(
385
- revisionExample,
386
- [txt],
387
- txt_1,
388
- revision
389
- )
390
-
391
-
392
-
393
-
394
- gr.TabbedInterface([demo, gptdemo,revisiondemo], ["Model", "GPT","Text Revision"]
395
- ).launch(inline=False)
396
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py CHANGED
@@ -1,14 +1,17 @@
1
  #
2
- # to run app, cd to app directory and do:
3
- # python ./app.py --active-listener --class-weight --residual
4
  #
5
 
6
  import gradio as gr
7
- from app import cosmic_view, gpt_view
8
 
9
- cosmic_model = cosmic_view.cosmic_ui()
10
- gpt_model = gpt_view.gpt_ui()
11
- demo = gr.TabbedInterface([cosmic_model, gpt_model], ["COSMIC", "GPT"])
 
 
 
12
 
13
  if __name__ == "__main__":
14
  demo.launch()
 
1
  #
2
+ # to run app, in root directory do:
3
+ # python ./main.py
4
  #
5
 
6
  import gradio as gr
7
+ from app import cosmic_view, gpt_view, deberta_view
8
 
9
+ cosmic_tab = cosmic_view.cosmic_ui()
10
+ gpt_tab = gpt_view.gpt_ui()
11
+ deberta_tab = deberta_view.deberta_ui()
12
+ demo = gr.TabbedInterface(
13
+ [cosmic_tab, gpt_tab, deberta_tab], ["COSMIC", "GPT", "DeBERTa"]
14
+ )
15
 
16
  if __name__ == "__main__":
17
  demo.launch()
requirements.txt CHANGED
@@ -1,12 +1,15 @@
 
1
  aiofiles==23.2.1
2
  altair==5.2.0
3
  annotated-types==0.6.0
4
  antlr4-python3-runtime==4.8
5
  anyio==3.7.1
6
  asttokens==2.4.1
 
7
  attrs==23.1.0
8
  bitarray==2.8.3
9
  blis==0.7.11
 
10
  catalogue==2.0.10
11
  certifi==2023.11.17
12
  cffi==1.16.0
@@ -30,12 +33,19 @@ fairseq==0.12.2
30
  fastapi==0.104.1
31
  ffmpy==0.3.1
32
  filelock==3.13.1
 
33
  fonttools==4.45.1
34
  fsspec==2023.10.0
35
  ftfy==6.1.3
 
 
 
 
36
  gradio==4.7.1
37
  gradio_client==0.7.0
 
38
  h11==0.14.0
 
39
  httpcore==1.0.2
40
  httpx==0.25.2
41
  huggingface-hub==0.19.4
@@ -44,6 +54,7 @@ idna==3.6
44
  importlib-resources==6.1.1
45
  ipykernel==6.27.1
46
  ipython==8.18.1
 
47
  jedi==0.19.1
48
  Jinja2==3.1.2
49
  joblib==1.3.2
@@ -51,22 +62,29 @@ jsonschema==4.20.0
51
  jsonschema-specifications==2023.11.2
52
  jupyter_client==8.6.0
53
  jupyter_core==5.5.0
 
 
54
  kiwisolver==1.4.5
55
  langcodes==3.3.0
 
56
  lxml==4.9.3
 
57
  markdown-it-py==3.0.0
58
  MarkupSafe==2.1.3
59
  matplotlib==3.8.2
60
  matplotlib-inline==0.1.6
61
  mdurl==0.1.2
 
62
  mpmath==1.3.0
63
  murmurhash==1.0.10
64
  nest-asyncio==1.5.8
65
  networkx==3.2.1
66
  nltk==3.8.1
67
- numpy==1.26.2
 
68
  omegaconf==2.0.6
69
  openai==1.3.7
 
70
  orjson==3.9.10
71
  packaging==23.2
72
  pandas==2.1.3
@@ -76,8 +94,11 @@ platformdirs==4.0.0
76
  portalocker==2.8.2
77
  preshed==3.0.9
78
  prompt-toolkit==3.0.41
 
79
  psutil==5.9.6
80
  pure-eval==0.2.2
 
 
81
  pycparser==2.21
82
  pydantic==2.5.2
83
  pydantic_core==2.14.5
@@ -88,15 +109,22 @@ python-dateutil==2.8.2
88
  python-dotenv==1.0.0
89
  python-multipart==0.0.6
90
  pytz==2023.3.post1
 
91
  PyYAML==6.0.1
92
  pyzmq==25.1.1
93
  referencing==0.31.1
94
  regex==2023.10.3
95
  requests==2.31.0
 
96
  rich==13.7.0
97
  rpds-py==0.13.2
 
98
  sacrebleu==2.3.3
 
 
 
99
  semantic-version==2.10.0
 
100
  shellingham==1.5.4
101
  six==1.16.0
102
  smart-open==6.4.0
@@ -109,22 +137,36 @@ stack-data==0.6.3
109
  starlette==0.27.0
110
  sympy==1.12
111
  tabulate==0.9.0
 
 
 
 
 
 
 
 
 
112
  thinc==8.2.1
 
113
  tomlkit==0.12.0
114
  toolz==0.12.0
115
- torch==2.1.1
116
  torchaudio==2.1.1
117
- torchvision==0.16.1
118
  tornado==6.4
119
  tqdm==4.66.1
120
  traitlets==5.14.0
 
 
121
  typer==0.9.0
122
  typing_extensions==4.8.0
123
  tzdata==2023.3
124
  urllib3==2.1.0
125
  uvicorn==0.24.0.post1
 
126
  wasabi==1.1.2
127
  wcwidth==0.2.12
128
  weasel==0.3.4
129
  websockets==11.0.3
130
- seaborn==0.13.0
 
 
1
+ absl-py==2.0.0
2
  aiofiles==23.2.1
3
  altair==5.2.0
4
  annotated-types==0.6.0
5
  antlr4-python3-runtime==4.8
6
  anyio==3.7.1
7
  asttokens==2.4.1
8
+ astunparse==1.6.3
9
  attrs==23.1.0
10
  bitarray==2.8.3
11
  blis==0.7.11
12
+ cachetools==5.3.2
13
  catalogue==2.0.10
14
  certifi==2023.11.17
15
  cffi==1.16.0
 
33
  fastapi==0.104.1
34
  ffmpy==0.3.1
35
  filelock==3.13.1
36
+ flatbuffers==23.5.26
37
  fonttools==4.45.1
38
  fsspec==2023.10.0
39
  ftfy==6.1.3
40
+ gast==0.4.0
41
+ google-auth==2.25.1
42
+ google-auth-oauthlib==1.0.0
43
+ google-pasta==0.2.0
44
  gradio==4.7.1
45
  gradio_client==0.7.0
46
+ grpcio==1.60.0
47
  h11==0.14.0
48
+ h5py==3.10.0
49
  httpcore==1.0.2
50
  httpx==0.25.2
51
  huggingface-hub==0.19.4
 
54
  importlib-resources==6.1.1
55
  ipykernel==6.27.1
56
  ipython==8.18.1
57
+ jax==0.4.21
58
  jedi==0.19.1
59
  Jinja2==3.1.2
60
  joblib==1.3.2
 
62
  jsonschema-specifications==2023.11.2
63
  jupyter_client==8.6.0
64
  jupyter_core==5.5.0
65
+ kaleido==0.2.1
66
+ keras==2.12.0
67
  kiwisolver==1.4.5
68
  langcodes==3.3.0
69
+ libclang==16.0.6
70
  lxml==4.9.3
71
+ Markdown==3.5.1
72
  markdown-it-py==3.0.0
73
  MarkupSafe==2.1.3
74
  matplotlib==3.8.2
75
  matplotlib-inline==0.1.6
76
  mdurl==0.1.2
77
+ ml-dtypes==0.2.0
78
  mpmath==1.3.0
79
  murmurhash==1.0.10
80
  nest-asyncio==1.5.8
81
  networkx==3.2.1
82
  nltk==3.8.1
83
+ numpy==1.23.5
84
+ oauthlib==3.2.2
85
  omegaconf==2.0.6
86
  openai==1.3.7
87
+ opt-einsum==3.3.0
88
  orjson==3.9.10
89
  packaging==23.2
90
  pandas==2.1.3
 
94
  portalocker==2.8.2
95
  preshed==3.0.9
96
  prompt-toolkit==3.0.41
97
+ protobuf==4.23.4
98
  psutil==5.9.6
99
  pure-eval==0.2.2
100
+ pyasn1==0.5.1
101
+ pyasn1-modules==0.3.0
102
  pycparser==2.21
103
  pydantic==2.5.2
104
  pydantic_core==2.14.5
 
109
  python-dotenv==1.0.0
110
  python-multipart==0.0.6
111
  pytz==2023.3.post1
112
+ pywin32==306
113
  PyYAML==6.0.1
114
  pyzmq==25.1.1
115
  referencing==0.31.1
116
  regex==2023.10.3
117
  requests==2.31.0
118
+ requests-oauthlib==1.3.1
119
  rich==13.7.0
120
  rpds-py==0.13.2
121
+ rsa==4.9
122
  sacrebleu==2.3.3
123
+ safetensors==0.4.1
124
+ scipy==1.11.4
125
+ seaborn==0.13.0
126
  semantic-version==2.10.0
127
+ sentencepiece==0.1.99
128
  shellingham==1.5.4
129
  six==1.16.0
130
  smart-open==6.4.0
 
137
  starlette==0.27.0
138
  sympy==1.12
139
  tabulate==0.9.0
140
+ tensorboard==2.12.3
141
+ tensorboard-data-server==0.7.2
142
+ tensorflow==2.12.0
143
+ tensorflow-addons==0.22.0
144
+ tensorflow-estimator==2.12.0
145
+ tensorflow-intel==2.12.0
146
+ tensorflow-io-gcs-filesystem==0.31.0
147
+ termcolor==2.4.0
148
+ textblob==0.17.1
149
  thinc==8.2.1
150
+ tokenizers==0.15.0
151
  tomlkit==0.12.0
152
  toolz==0.12.0
153
+ torch==2.1.1+cu121
154
  torchaudio==2.1.1
155
+ torchvision==0.16.1+cu121
156
  tornado==6.4
157
  tqdm==4.66.1
158
  traitlets==5.14.0
159
+ transformers==4.35.2
160
+ typeguard==2.13.3
161
  typer==0.9.0
162
  typing_extensions==4.8.0
163
  tzdata==2023.3
164
  urllib3==2.1.0
165
  uvicorn==0.24.0.post1
166
+ vega-datasets==0.9.0
167
  wasabi==1.1.2
168
  wcwidth==0.2.12
169
  weasel==0.3.4
170
  websockets==11.0.3
171
+ Werkzeug==3.0.1
172
+ wrapt==1.14.1