Datasets:

ArXiv:
License:
devnote5676 commited on
Commit
920e708
β€’
1 Parent(s): a970d46

Upload SOCKET.py

Browse files
Files changed (1) hide show
  1. SOCKET.py +398 -0
SOCKET.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The SOCKET Datasets"""
16
+
17
+
18
+ import datasets
19
+
20
+
21
+ _CITATION = """
22
+ @misc{choi2023llms,
23
+ title={Do LLMs Understand Social Knowledge? Evaluating the Sociability of Large Language Models with SocKET Benchmark},
24
+ author={Minje Choi and Jiaxin Pei and Sagar Kumar and Chang Shu and David Jurgens},
25
+ year={2023},
26
+ eprint={2305.14938},
27
+ archivePrefix={arXiv},
28
+ primaryClass={cs.CL}
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ A unified evaluation benchmark dataset for evaludating socialbility of NLP models.
34
+ """
35
+
36
+ _HOMEPAGE = "TBD"
37
+
38
+ _LICENSE = ""
39
+
40
+ #set up url or the file dir here
41
+ URL = "SOCKET_DATA/"
42
+ URL = "https://huggingface.co/datasets/Blablablab/SOCKET/tree/main/SOCKET_DATA/"
43
+
44
+ TASK_DICT = {
45
+ 'humor_sarcasm': [
46
+ 'hahackathon#humor_rating',
47
+ 'humor-pairs',
48
+ 'sarc',
49
+ 'tweet_irony',
50
+ 'hahackathon#is_humor',
51
+ ],
52
+ 'offensive': [
53
+ 'contextual-abuse#IdentityDirectedAbuse',
54
+ 'contextual-abuse#PersonDirectedAbuse',
55
+ 'hahackathon#offense_rating',
56
+ 'hasbiasedimplication',
57
+ 'hateoffensive',
58
+ 'implicit-hate#explicit_hate',
59
+ 'implicit-hate#implicit_hate',
60
+ 'implicit-hate#incitement_hate',
61
+ 'implicit-hate#inferiority_hate',
62
+ 'implicit-hate#stereotypical_hate',
63
+ 'implicit-hate#threatening_hate',
64
+ 'implicit-hate#white_grievance_hate',
65
+ 'intentyn',
66
+ 'jigsaw#severe_toxic',
67
+ 'jigsaw#identity_hate',
68
+ 'jigsaw#threat',
69
+ 'jigsaw#obscene',
70
+ 'jigsaw#insult',
71
+ 'jigsaw#toxic',
72
+ 'offensiveyn',
73
+ 'sexyn',
74
+ 'talkdown-pairs',
75
+ 'toxic-span',
76
+ 'tweet_offensive'
77
+ ],
78
+ 'sentiment_emotion': [
79
+ 'crowdflower',
80
+ 'dailydialog',
81
+ 'emobank#arousal',
82
+ 'emobank#dominance',
83
+ 'emobank#valence',
84
+ 'emotion-span',
85
+ 'empathy#distress',
86
+ 'empathy#distress_bin',
87
+ 'same-side-pairs',
88
+ 'sentitreebank',
89
+ 'tweet_emoji',
90
+ 'tweet_emotion',
91
+ 'tweet_sentiment'
92
+ ],
93
+ 'social_factors': [
94
+ 'complaints',
95
+ 'empathy#empathy',
96
+ 'empathy#empathy_bin',
97
+ 'hayati_politeness',
98
+ 'questionintimacy',
99
+ 'stanfordpoliteness'
100
+ ],
101
+ 'trustworthy': [
102
+ 'bragging#brag_achievement',
103
+ 'bragging#brag_action',
104
+ 'bragging#brag_possession',
105
+ 'bragging#brag_trait',
106
+ 'hypo-l',
107
+ 'neutralizing-bias-pairs',
108
+ 'propaganda-span',
109
+ 'rumor#rumor_bool',
110
+ 'two-to-lie#receiver_truth',
111
+ 'two-to-lie#sender_truth',
112
+ ]
113
+ }
114
+
115
+ task2category = {}
116
+ for category, tasks in TASK_DICT.items():
117
+ for task in tasks:
118
+ task2category[task] = category
119
+
120
+ TASK_NAMES = []
121
+ for tasks in TASK_DICT.values():
122
+ TASK_NAMES.extend(tasks)
123
+ TASK_NAMES = sorted(TASK_NAMES)
124
+
125
+ print(len(TASK_NAMES))
126
+ _URLs = {}
127
+ for task in TASK_NAMES:
128
+ _URLs[task] = {}
129
+ for s in ['train', 'test', 'val']:
130
+ for t in ['text', 'labels']:
131
+ task_url = '%s%s/%s_%s.txt'%(URL,task,s,t)
132
+ task_url = task_url.replace('#','%23')
133
+ _URLs[task][s + '_' + t] = task_url
134
+
135
+ class SOCKETConfig(datasets.BuilderConfig):
136
+ def __init__(self, *args, type=None, sub_type=None, **kwargs):
137
+ super().__init__(
138
+ *args,
139
+ name=f"{type}",
140
+ **kwargs,
141
+ )
142
+ self.type = type
143
+ self.sub_type = sub_type
144
+
145
+
146
+ class SOCKET(datasets.GeneratorBasedBuilder):
147
+ """SOCKET Dataset."""
148
+
149
+ BUILDER_CONFIGS = [
150
+ SOCKETConfig(
151
+ type=key,
152
+ sub_type=None,
153
+ version=datasets.Version("1.1.0"),
154
+ description=f"This part of my dataset covers {key} part of SocialEval Dataset.",
155
+ )
156
+ for key in list(TASK_NAMES)
157
+ ]
158
+
159
+ def _info(self):
160
+ if self.config.type == "questionintimacy":
161
+ names = ['Very-intimate', 'Intimate', 'Somewhat-intimate', 'Not-very-intimate', 'Not-intimate', 'Not-intimate-at-all']
162
+ elif self.config.type == "sexyn":
163
+ names = ['not sexism', 'sexism']
164
+ elif self.config.type == "intentyn":
165
+ names = ['not intentional', 'intentional']
166
+ elif self.config.type == "offensiveyn":
167
+ names = ['not offensive', 'offensive']
168
+ elif self.config.type == "hasbiasedimplication":
169
+ names = ['not biased', 'biased']
170
+ elif self.config.type == "trofi":
171
+ names = ['metaphor', 'non-metaphor']
172
+ elif self.config.type == "sentitreebank":
173
+ names = ['positive', 'negative']
174
+ elif self.config.type == "sarc":
175
+ names = ['sarcastic', 'literal']
176
+ elif self.config.type == "stanfordpoliteness":
177
+ names = ['polite', 'impolite']
178
+ elif self.config.type == "sarcasmghosh":
179
+ names = ['sarcastic', 'literal']
180
+ elif self.config.type == "dailydialog":
181
+ names = ['noemotion', 'anger', 'disgust', 'fear', 'happiness', 'sadness', 'surprise']
182
+ elif self.config.type == "shortromance":
183
+ names = ['romantic', 'literal']
184
+ elif self.config.type == "crowdflower":
185
+ names = ['empty', 'sadness', 'enthusiasm', 'neutral', 'worry', 'love', 'fun', 'hate', 'happiness', 'relief', 'boredom', 'surprise', 'anger']
186
+ elif self.config.type == "vua":
187
+ names = ['metaphor', 'non-metaphor']
188
+ elif self.config.type == "shorthumor":
189
+ names = ['humorous', 'literal']
190
+ elif self.config.type == "shortjokekaggle":
191
+ names = ['humorous', 'literal']
192
+ elif self.config.type == "hateoffensive":
193
+ names = ['hate', 'offensive', 'neither']
194
+ elif self.config.type == "emobank#valence":
195
+ names = ['valence(positive)']
196
+ elif self.config.type == "emobank#arousal":
197
+ names = ['arousal(excited)']
198
+ elif self.config.type == "emobank#dominance":
199
+ names = ['dominance(being_in_control)']
200
+ elif self.config.type == "hayati_politeness":
201
+ names = ['impolite', 'polite']
202
+ elif self.config.type == "jigsaw#toxic":
203
+ names = ['not toxic', 'toxic']
204
+ elif self.config.type == "jigsaw#severe_toxic":
205
+ names = ['not severe toxic', 'severe toxic']
206
+ elif self.config.type == "jigsaw#obscene":
207
+ names = ['not obscene', 'obscene']
208
+ elif self.config.type == "jigsaw#threat":
209
+ names = ['not threat', 'threat']
210
+ elif self.config.type == "jigsaw#insult":
211
+ names = ['not insult', 'insult']
212
+ elif self.config.type == "jigsaw#identity_hate":
213
+ names = ['not identity hate', 'identity hate']
214
+ elif self.config.type == "standup-comedy":
215
+ names = ['not funny', 'funny']
216
+ elif self.config.type == "complaints":
217
+ names = ['not complaint', 'complaint']
218
+ elif self.config.type == "hypo-l":
219
+ names = ['not hyperbole', 'hyperbole']
220
+ elif self.config.type == "bragging#brag_action":
221
+ names = ['not action bragging', 'action bragging']
222
+ elif self.config.type == "bragging#brag_feeling":
223
+ names = ['not feeling bragging', 'feeling bragging']
224
+ elif self.config.type == "bragging#brag_achievement":
225
+ names = ['not achievement bragging', 'achievement bragging']
226
+ elif self.config.type == "bragging#brag_possession":
227
+ names = ['not possession bragging', 'possession bragging']
228
+ elif self.config.type == "bragging#brag_trait":
229
+ names = ['not trait bragging', 'trait bragging']
230
+ elif self.config.type == "bragging#brag_affiliation":
231
+ names = ['not affiliation bragging', 'affiliation bragging']
232
+ elif self.config.type == "contextual-abuse#IdentityDirectedAbuse":
233
+ names = ['not identity directed abuse', 'identity directed abuse']
234
+ elif self.config.type == "contextual-abuse#AffiliationDirectedAbuse":
235
+ names = ['not affiliation directed abuse', 'affiliation directed abuse']
236
+ elif self.config.type == "contextual-abuse#PersonDirectedAbuse":
237
+ names = ['not person directed abuse', 'person directed abuse']
238
+ elif self.config.type == "contextual-abuse#CounterSpeech":
239
+ names = ['not counter speech', 'counter speech']
240
+ elif self.config.type == "hahackathon#is_humor":
241
+ names = ['not humor', 'humor']
242
+ elif self.config.type == "hahackathon#humor_rating":
243
+ names = ['humor rating']
244
+ elif self.config.type == "hahackathon#offense_rating":
245
+ names = ['offense rating']
246
+ elif self.config.type == "check_worthiness":
247
+ names = ['not check-worthy', 'check-worthy']
248
+ elif self.config.type == "rumor#rumor_tf":
249
+ names = ['not rumor tf', 'rumor tf']
250
+ elif self.config.type == "rumor#rumor_bool":
251
+ names = ['not rumor', 'rumor']
252
+ elif self.config.type == "two-to-lie#deception":
253
+ names = ['not deception', 'deception']
254
+ elif self.config.type == "two-to-lie#sender_truth":
255
+ names = ['lie', 'truth']
256
+ elif self.config.type == "two-to-lie#receiver_truth":
257
+ names = ['lie', 'truth']
258
+ elif self.config.type == "deceitful-reviews#true_rumor":
259
+ names = ['fake review', 'true review']
260
+ elif self.config.type == "deceitful-reviews#positive":
261
+ names = ['negative', 'positive']
262
+ elif self.config.type == "empathy#empathy":
263
+ names = ['empathy']
264
+ elif self.config.type == "empathy#distress":
265
+ names = ['distress']
266
+ elif self.config.type == "empathy#empathy_bin":
267
+ names = ['not empathy', 'empathy']
268
+ elif self.config.type == "empathy#distress_bin":
269
+ names = ['not distress', 'distress bin']
270
+ elif self.config.type == "implicit-hate#explicit_hate":
271
+ names = ['not explicit hate', 'explicit hate']
272
+ elif self.config.type == "implicit-hate#implicit_hate":
273
+ names = ['not implicit hate', 'implicit hate']
274
+ elif self.config.type == "implicit-hate#threatening_hate":
275
+ names = ['not threatening hate', 'threatening hate']
276
+ elif self.config.type == "implicit-hate#irony_hate":
277
+ names = ['not irony hate', 'irony hate']
278
+ elif self.config.type == "implicit-hate#other_hate":
279
+ names = ['not other hate', 'other hate']
280
+ elif self.config.type == "implicit-hate#incitement_hate":
281
+ names = ['not incitement hate', 'incitement hate']
282
+ elif self.config.type == "implicit-hate#inferiority_hate":
283
+ names = ['not inferiority hate', 'inferiority hate']
284
+ elif self.config.type == "implicit-hate#stereotypical_hate":
285
+ names = ['not stereotypical hate', 'stereotypical hate']
286
+ elif self.config.type == "implicit-hate#white_grievance_hate":
287
+ names = ['not white grievance hate', 'white grievance hate']
288
+ elif self.config.type == "waseem_and_hovy#sexism":
289
+ names = ['not sexism', 'sexism']
290
+ elif self.config.type == "waseem_and_hovy#racism":
291
+ names = ['not racism', 'racism']
292
+ elif self.config.type == "humor-pairs":
293
+ names = ['the first sentence is funnier', 'the second sentence is funnier']
294
+ elif self.config.type == "neutralizing-bias-pairs":
295
+ names = ['the first sentence is biased', 'the second sentence is biased']
296
+ elif self.config.type == "same-side-pairs":
297
+ names = ['not same side', 'same side']
298
+ elif self.config.type == "talkdown-pairs":
299
+ names = ['not condescension', 'condescension']
300
+ elif self.config.type == "tweet_sentiment":
301
+ names = ["negative", "neutral", "positive"]
302
+ elif self.config.type == "tweet_offensive":
303
+ names = ["not offensive", "offensive"]
304
+ elif self.config.type == "tweet_irony":
305
+ names = ["not irony", "irony"]
306
+ elif self.config.type == "tweet_hate":
307
+ names = ["not hate", "hate"]
308
+ elif self.config.type == "tweet_emoji":
309
+ names = [
310
+ "❀",
311
+ "😍",
312
+ "πŸ˜‚",
313
+ "πŸ’•",
314
+ "πŸ”₯",
315
+ "😊",
316
+ "😎",
317
+ "✨",
318
+ "πŸ’™",
319
+ "😘",
320
+ "πŸ“·",
321
+ "πŸ‡ΊπŸ‡Έ",
322
+ "β˜€",
323
+ "πŸ’œ",
324
+ "πŸ˜‰",
325
+ "πŸ’―",
326
+ "😁",
327
+ "πŸŽ„",
328
+ "πŸ“Έ",
329
+ "😜",
330
+ ]
331
+
332
+ elif self.config.type == "tweet_emotion":
333
+ names = ["anger", "joy", "optimism", "sadness"]
334
+ elif self.config.type == "emotion-span":
335
+ names = ['cause']
336
+ label_type = datasets.Sequence(feature={n:datasets.Value(dtype='string', id=None) for n in names})
337
+ print(label_type)
338
+ elif self.config.type == "propaganda-span":
339
+ names = ['propaganda']
340
+ label_type = datasets.Sequence(feature={n:datasets.Value(dtype='string', id=None) for n in names})
341
+ elif self.config.type == "toxic-span":
342
+ names = ['toxic']
343
+ label_type = datasets.Sequence(feature={n:datasets.Value(dtype='string', id=None) for n in names})
344
+
345
+ if self.config.type[-4:]=='span':
346
+ label_type = label_type#datasets.Sequence(feature={n:datasets.Value(dtype='string') for n in names})
347
+ elif len(names) > 1:
348
+ label_type = datasets.features.ClassLabel(names=names)
349
+ else:
350
+ label_type = datasets.Value("float32")
351
+
352
+
353
+ return datasets.DatasetInfo(
354
+ description=_DESCRIPTION,
355
+ features=datasets.Features(
356
+ {"text": datasets.Value("string"),
357
+ "label": label_type}
358
+ ),
359
+ supervised_keys=None,
360
+ homepage=_HOMEPAGE,
361
+ license=_LICENSE,
362
+ citation=_CITATION,
363
+ )
364
+
365
+ def _split_generators(self, dl_manager):
366
+ """Returns SplitGenerators."""
367
+ my_urls = _URLs[self.config.type]
368
+ data_dir = dl_manager.download_and_extract(my_urls)
369
+ return [
370
+ datasets.SplitGenerator(
371
+ name=datasets.Split.TRAIN,
372
+ # These kwargs will be passed to _generate_examples
373
+ gen_kwargs={"text_path": data_dir["train_text"], "labels_path": data_dir["train_labels"]},
374
+ ),
375
+ datasets.SplitGenerator(
376
+ name=datasets.Split.TEST,
377
+ # These kwargs will be passed to _generate_examples
378
+ gen_kwargs={"text_path": data_dir["test_text"], "labels_path": data_dir["test_labels"]},
379
+ ),
380
+ datasets.SplitGenerator(
381
+ name=datasets.Split.VALIDATION,
382
+ # These kwargs will be passed to _generate_examples
383
+ gen_kwargs={"text_path": data_dir["val_text"], "labels_path": data_dir["val_labels"]},
384
+ ),
385
+ ]
386
+
387
+ def _generate_examples(self, text_path, labels_path):
388
+ """Yields examples."""
389
+
390
+ with open(text_path, encoding="utf-8") as f:
391
+ texts = f.readlines()
392
+ print(len(texts))
393
+ with open(labels_path, encoding="utf-8") as f:
394
+ labels = f.readlines()
395
+ print(len(labels))
396
+
397
+ for i, text in enumerate(texts):
398
+ yield i, {"text": text.strip(), "label": labels[i].strip() if self.config.type[-4:]!='span' else eval(labels[i])}