system HF staff commited on
Commit
3a142fd
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
clue.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """A Chinese Language Understanding Evaluation Benchmark (CLUE) benchmark."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+ import re
24
+ import textwrap
25
+
26
+ import six
27
+
28
+ import datasets
29
+
30
+
31
+ _CLUE_CITATION = """\
32
+ @misc{xu2020clue,
33
+ title={CLUE: A Chinese Language Understanding Evaluation Benchmark},
34
+ author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},
35
+ year={2020},
36
+ eprint={2004.05986},
37
+ archivePrefix={arXiv},
38
+ primaryClass={cs.CL}
39
+ }
40
+ """
41
+
42
+ _CLUE_DESCRIPTION = """\
43
+ CLUE, A Chinese Language Understanding Evaluation Benchmark
44
+ (https://www.cluebenchmarks.com/) is a collection of resources for training,
45
+ evaluating, and analyzing Chinese language understanding systems.
46
+
47
+ """
48
+
49
+
50
+ class ClueConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for CLUE."""
52
+
53
+ def __init__(
54
+ self,
55
+ data_url,
56
+ text_features=None,
57
+ label_column=None,
58
+ data_dir="",
59
+ citation="",
60
+ url="",
61
+ label_classes=None,
62
+ process_label=lambda x: x,
63
+ **kwargs,
64
+ ):
65
+ """BuilderConfig for CLUE.
66
+
67
+ Args:
68
+ text_features: `dict[string, string]`, map from the name of the feature
69
+ dict for each text field to the name of the column in the tsv file
70
+ label_column: `string`, name of the column in the tsv file corresponding
71
+ to the label
72
+ data_url: `string`, url to download the zip file from
73
+ data_dir: `string`, the path to the folder containing the tsv files in the
74
+ downloaded zip
75
+ citation: `string`, citation for the data set
76
+ url: `string`, url for information about the data set
77
+ label_classes: `list[string]`, the list of classes if the label is
78
+ categorical. If not provided, then the label will be of type
79
+ `datasets.Value('float32')`.
80
+ process_label: `Function[string, any]`, function taking in the raw value
81
+ of the label and processing it to the form required by the label feature
82
+ **kwargs: keyword arguments forwarded to super.
83
+ """
84
+ super(ClueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
85
+ self.text_features = text_features
86
+ self.label_column = label_column
87
+ self.label_classes = label_classes
88
+ self.data_url = data_url
89
+ self.data_dir = data_dir
90
+ self.citation = citation
91
+ self.url = url
92
+ self.process_label = process_label
93
+
94
+
95
+ class Clue(datasets.GeneratorBasedBuilder):
96
+ """A Chinese Language Understanding Evaluation Benchmark (CLUE) benchmark."""
97
+
98
+ BUILDER_CONFIGS = [
99
+ ClueConfig(
100
+ name="afqmc",
101
+ description=textwrap.dedent(
102
+ """\
103
+ Ant Financial Question Matching Corpus is a dataset for Chinese
104
+ question matching (similar to QQP).
105
+ """
106
+ ),
107
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2"},
108
+ label_classes=["0", "1"],
109
+ label_column="label",
110
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/afqmc_public.zip",
111
+ url="https://dc.cloud.alipay.com/index#/topic/data?id=8",
112
+ ),
113
+ ClueConfig(
114
+ name="tnews",
115
+ description=textwrap.dedent(
116
+ """\
117
+ Toutiao Short Text Classification for News is a dataset for Chinese
118
+ short news classification.
119
+ """
120
+ ),
121
+ text_features={"sentence": "sentence"},
122
+ label_classes=[
123
+ "100",
124
+ "101",
125
+ "102",
126
+ "103",
127
+ "104",
128
+ "106",
129
+ "107",
130
+ "108",
131
+ "109",
132
+ "110",
133
+ "112",
134
+ "113",
135
+ "114",
136
+ "115",
137
+ "116",
138
+ ],
139
+ label_column="label",
140
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/tnews_public.zip",
141
+ url="https://github.com/skdjfla/toutiao-text-classfication-dataset",
142
+ ),
143
+ ClueConfig(
144
+ name="iflytek",
145
+ description=textwrap.dedent(
146
+ """\
147
+ IFLYTEK Long Text Classification for News is a dataset for Chinese
148
+ long text classification. The text is crawled from an app market.
149
+ """
150
+ ),
151
+ text_features={"sentence": "sentence"},
152
+ label_classes=[str(label) for label in range(119)],
153
+ label_column="label",
154
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/iflytek_public.zip",
155
+ ),
156
+ ClueConfig(
157
+ name="cmnli",
158
+ description=textwrap.dedent(
159
+ """\
160
+ Chinese Multi-Genre NLI is a dataset for Chinese Natural Language
161
+ Inference. It consists of XNLI (Chinese subset) and translated MNLI.
162
+ """
163
+ ),
164
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2"},
165
+ label_classes=["neutral", "entailment", "contradiction"],
166
+ label_column="label",
167
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/cmnli_public.zip",
168
+ data_dir="cmnli_public",
169
+ ),
170
+ ClueConfig(
171
+ name="cluewsc2020",
172
+ description=textwrap.dedent(
173
+ """\
174
+ CLUE Winograd Scheme Challenge (CLUEWSC 2020) is a Chinese WSC dataset.
175
+ The text is from contemporary literature and annotated by human experts.
176
+ The task is to determine which noun the pronoun in the sentence refers to.
177
+ The question appears in the form of true and false discrimination.
178
+ """
179
+ ),
180
+ text_features={"text": "text", "target": "target"},
181
+ label_classes=["false", "true"],
182
+ label_column="label",
183
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/cluewsc2020_public.zip",
184
+ ),
185
+ ClueConfig(
186
+ name="csl",
187
+ description=textwrap.dedent(
188
+ """\
189
+ Chinese Scientific Literature Dataset (CSL) is taken from the abstracts of
190
+ Chinese papers and their keywords. The papers are selected from some core
191
+ journals of Chinese social sciences and natural sciences. TF-IDF is used to
192
+ generate a mixture of fake keywords and real keywords in the paper to construct
193
+ abstract-keyword pairs. The task goal is to judge whether the keywords are
194
+ all real keywords based on the abstract.
195
+ """
196
+ ),
197
+ text_features={"abst": "abst", "keyword": "keyword", "corpus_id": "id"},
198
+ label_classes=["0", "1"],
199
+ label_column="label",
200
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/csl_public.zip",
201
+ url="https://github.com/P01son6415/CSL",
202
+ ),
203
+ ClueConfig(
204
+ name="cmrc2018",
205
+ description=textwrap.dedent(
206
+ """\
207
+ CMRC2018 is the first Chinese Span-Extraction Machine Reading Comprehension
208
+ Dataset. The task requires to set up a system that reads context,
209
+ question and extract the answer from the context (the answer is a continuous
210
+ span in the context).
211
+ """
212
+ ),
213
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/cmrc2018_public.zip",
214
+ url="https://hfl-rc.github.io/cmrc2018/",
215
+ citation=textwrap.dedent(
216
+ """\
217
+ @article{cmrc2018-dataset,
218
+ title={A Span-Extraction Dataset for Chinese Machine Reading Comprehension},
219
+ author={Cui, Yiming and Liu, Ting and Xiao, Li and Chen, Zhipeng and Ma, Wentao and Che, Wanxiang and Wang, Shijin and Hu, Guoping},
220
+ journal={arXiv preprint arXiv:1810.07366},
221
+ year={2018}
222
+ }"""
223
+ ),
224
+ ),
225
+ ClueConfig(
226
+ name="drcd",
227
+ description=textwrap.dedent(
228
+ """\
229
+ Delta Reading Comprehension Dataset (DRCD) belongs to the general field of traditional
230
+ Chinese machine reading comprehension data set. This data set is expected to become a
231
+ standard Chinese reading comprehension data set suitable for transfer learning.
232
+ """
233
+ ),
234
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/drcd_public.zip",
235
+ url="https://github.com/DRCKnowledgeTeam/DRCD",
236
+ ),
237
+ ClueConfig(
238
+ name="chid",
239
+ description=textwrap.dedent(
240
+ """\
241
+ Chinese IDiom Dataset for Cloze Test (CHID) contains many masked idioms in the text.
242
+ The candidates contain similar idioms to the real ones.
243
+ """
244
+ ),
245
+ text_features={"candidates": "candidates", "content": "content"},
246
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/chid_public.zip",
247
+ url="https://arxiv.org/abs/1906.01265",
248
+ citation=textwrap.dedent(
249
+ """\
250
+ @article{Zheng_2019,
251
+ title={ChID: A Large-scale Chinese IDiom Dataset for Cloze Test},
252
+ url={http://dx.doi.org/10.18653/v1/P19-1075},
253
+ DOI={10.18653/v1/p19-1075},
254
+ journal={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
255
+ publisher={Association for Computational Linguistics},
256
+ author={Zheng, Chujie and Huang, Minlie and Sun, Aixin},
257
+ year={2019}
258
+ }"""
259
+ ),
260
+ ),
261
+ ClueConfig(
262
+ name="c3",
263
+ description=textwrap.dedent(
264
+ """\
265
+ Multiple-Choice Chinese Machine Reading Comprehension (C3, or C^3) is a Chinese
266
+ multi-choice reading comprehension data set, including mixed type data sets
267
+ such as dialogue and long text. Both the training and validation sets are
268
+ the concatenation of the dialogue and long-text subsets.
269
+ """
270
+ ),
271
+ text_features={"candidates": "candidates", "content": "content"},
272
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/c3_public.zip",
273
+ url="https://arxiv.org/abs/1904.09679",
274
+ citation=textwrap.dedent(
275
+ """\
276
+ @article{sun2020investigating,
277
+ author = {Kai Sun and
278
+ Dian Yu and
279
+ Dong Yu and
280
+ Claire Cardie},
281
+ title = {Investigating Prior Knowledge for Challenging Chinese Machine Reading
282
+ Comprehension},
283
+ journal = {Trans. Assoc. Comput. Linguistics},
284
+ volume = {8},
285
+ pages = {141--155},
286
+ year = {2020},
287
+ url = {https://transacl.org/ojs/index.php/tacl/article/view/1882}
288
+ }"""
289
+ ),
290
+ ),
291
+ ClueConfig(
292
+ name="diagnostics",
293
+ description=textwrap.dedent(
294
+ """\
295
+ Diagnostic set, used to evaluate the performance of different models on 9 Chinese language
296
+ phenomena summarized by linguists.
297
+
298
+ Use the model trained on CMNLI to directly predict the result on this diagnostic set.
299
+ """
300
+ ),
301
+ text_features={"sentence1": "premise", "sentence2": "hypothesis"},
302
+ label_classes=["neutral", "entailment", "contradiction"],
303
+ label_column="label",
304
+ data_url="https://storage.googleapis.com/cluebenchmark/tasks/clue_diagnostics_public.zip",
305
+ ),
306
+ ]
307
+
308
+ def _info(self):
309
+ if self.config.name in ["afqmc", "tnews", "iflytek", "cmnli", "diagnostics"]:
310
+ features = {
311
+ text_feature: datasets.Value("string") for text_feature in six.iterkeys(self.config.text_features)
312
+ }
313
+ if self.config.label_classes:
314
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
315
+ else:
316
+ features["label"] = datasets.Value("float32")
317
+ features["idx"] = datasets.Value("int32")
318
+ elif self.config.name == "cluewsc2020":
319
+ features = {
320
+ "idx": datasets.Value("int32"),
321
+ "text": datasets.Value("string"),
322
+ "label": datasets.features.ClassLabel(names=["true", "false"]),
323
+ "target": {
324
+ "span1_text": datasets.Value("string"),
325
+ "span2_text": datasets.Value("string"),
326
+ "span1_index": datasets.Value("int32"),
327
+ "span2_index": datasets.Value("int32"),
328
+ },
329
+ }
330
+ elif self.config.name == "csl":
331
+ features = {
332
+ "idx": datasets.Value("int32"),
333
+ "corpus_id": datasets.Value("int32"),
334
+ "abst": datasets.Value("string"),
335
+ "label": datasets.features.ClassLabel(names=self.config.label_classes),
336
+ "keyword": datasets.Sequence(datasets.Value("string")),
337
+ }
338
+ elif self.config.name in ["cmrc2018", "drcd"]:
339
+ features = {
340
+ "id": datasets.Value("string"),
341
+ "context": datasets.Value("string"),
342
+ "question": datasets.Value("string"),
343
+ "answers": datasets.Sequence(
344
+ {
345
+ "text": datasets.Value("string"),
346
+ "answer_start": datasets.Value("int32"),
347
+ }
348
+ ),
349
+ }
350
+ elif self.config.name == "chid":
351
+ features = {
352
+ "idx": datasets.Value("int32"),
353
+ "candidates": datasets.Sequence(datasets.Value("string")),
354
+ "content": datasets.Sequence(datasets.Value("string")),
355
+ "answers": datasets.features.Sequence(
356
+ {
357
+ "text": datasets.Value("string"),
358
+ "candidate_id": datasets.Value("int32"),
359
+ }
360
+ ),
361
+ }
362
+ elif self.config.name == "c3":
363
+ features = {
364
+ "id": datasets.Value("int32"),
365
+ "context": datasets.Sequence(datasets.Value("string")),
366
+ "question": datasets.Value("string"),
367
+ "choice": datasets.Sequence(datasets.Value("string")),
368
+ "answer": datasets.Value("string"),
369
+ }
370
+ else:
371
+ raise NotImplementedError(
372
+ "This task is not implemented. If you believe"
373
+ " this task was recently added to the CLUE benchmark, "
374
+ "please open a GitHub issue and we will add it."
375
+ )
376
+
377
+ return datasets.DatasetInfo(
378
+ description=_CLUE_DESCRIPTION,
379
+ features=datasets.Features(features),
380
+ homepage=self.config.url,
381
+ citation=self.config.citation + "\n" + _CLUE_CITATION,
382
+ )
383
+
384
+ def _split_generators(self, dl_manager):
385
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
386
+ data_dir = os.path.join(dl_dir, self.config.data_dir)
387
+ test_split = datasets.SplitGenerator(
388
+ name=datasets.Split.TEST,
389
+ gen_kwargs={
390
+ "data_file": os.path.join(
391
+ data_dir, "test.json" if self.config.name != "diagnostics" else "diagnostics_test.json"
392
+ ),
393
+ "split": "test",
394
+ },
395
+ )
396
+
397
+ split_list = [test_split]
398
+
399
+ if self.config.name != "diagnostics":
400
+ train_split = datasets.SplitGenerator(
401
+ name=datasets.Split.TRAIN,
402
+ gen_kwargs={
403
+ "data_file": os.path.join(
404
+ data_dir or "", "train.json" if self.config.name != "c3" else "d-train.json"
405
+ ),
406
+ "split": "train",
407
+ },
408
+ )
409
+ val_split = datasets.SplitGenerator(
410
+ name=datasets.Split.VALIDATION,
411
+ gen_kwargs={
412
+ "data_file": os.path.join(
413
+ data_dir or "", "dev.json" if self.config.name != "c3" else "d-dev.json"
414
+ ),
415
+ "split": "dev",
416
+ },
417
+ )
418
+ split_list += [train_split, val_split]
419
+
420
+ if self.config.name == "cmrc2018":
421
+ split_list.append(
422
+ datasets.SplitGenerator(
423
+ name=datasets.Split("trial"),
424
+ gen_kwargs={
425
+ "data_file": os.path.join(data_dir or "", "trial.json"),
426
+ "split": "trial",
427
+ },
428
+ )
429
+ )
430
+
431
+ return split_list
432
+
433
+ def _generate_examples(self, data_file, split):
434
+ process_label = self.config.process_label
435
+ label_classes = self.config.label_classes
436
+
437
+ if self.config.name == "chid" and split != "test":
438
+ answer_file = os.path.join(os.path.dirname(data_file), "{}_answer.json".format(split))
439
+ answer_dict = json.load(open(answer_file, encoding="utf8"))
440
+
441
+ if self.config.name == "c3":
442
+ if split == "test":
443
+ files = [data_file]
444
+ else:
445
+ data_dir = os.path.dirname(data_file)
446
+ files = [os.path.join(data_dir, "{}-{}.json".format(typ, split)) for typ in ["d", "m"]]
447
+ data = []
448
+ for f in files:
449
+ data_subset = json.load(open(f, encoding="utf8"))
450
+ data += data_subset
451
+ for idx, entry in enumerate(data):
452
+ for question in entry[1]:
453
+ example = {
454
+ "id": idx if split != "test" else int(question["id"]),
455
+ "context": entry[0],
456
+ "question": question["question"],
457
+ "choice": question["choice"],
458
+ "answer": question["answer"] if split != "test" else "",
459
+ }
460
+ yield example["id"], example
461
+
462
+ else:
463
+ with open(data_file, encoding="utf8") as f:
464
+ if self.config.name in ["cmrc2018", "drcd"]:
465
+ data = json.load(f)
466
+ for example in data["data"]:
467
+ for paragraph in example["paragraphs"]:
468
+ context = paragraph["context"].strip()
469
+ for qa in paragraph["qas"]:
470
+ question = qa["question"].strip()
471
+ id_ = qa["id"]
472
+
473
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
474
+ answers = [answer["text"].strip() for answer in qa["answers"]]
475
+
476
+ yield id_, {
477
+ "context": context,
478
+ "question": question,
479
+ "id": id_,
480
+ "answers": {
481
+ "answer_start": answer_starts,
482
+ "text": answers,
483
+ },
484
+ }
485
+
486
+ else:
487
+ for n, line in enumerate(f):
488
+ row = json.loads(line)
489
+ example = {feat: row[col] for feat, col in six.iteritems(self.config.text_features)}
490
+ example["idx"] = n if self.config.name != "diagnostics" else int(row["index"])
491
+ if self.config.name == "chid": # CHID has a separate gold label file
492
+ contents = example["content"]
493
+ candidates = example["candidates"]
494
+ idiom_list = []
495
+ if split != "test":
496
+ for content in contents:
497
+ idioms = re.findall(r"#idiom\d+#", content)
498
+ for idiom in idioms:
499
+ idiom_list.append(
500
+ {
501
+ "candidate_id": answer_dict[idiom],
502
+ "text": candidates[answer_dict[idiom]],
503
+ }
504
+ )
505
+ example["answers"] = idiom_list
506
+
507
+ elif self.config.label_column in row:
508
+ label = row[self.config.label_column]
509
+ # Notice: some labels in CMNLI are missing. We drop these data.
510
+ if self.config.name == "cmnli" and label == "-":
511
+ continue
512
+ # For some tasks, the label is represented as 0 and 1 in the tsv
513
+ # files and needs to be cast to integer to work with the feature.
514
+ if label_classes and label not in label_classes:
515
+ label = int(label) if label else None
516
+ example["label"] = process_label(label)
517
+ else:
518
+ example["label"] = process_label(-1)
519
+
520
+ # Filter out corrupted rows.
521
+ for value in six.itervalues(example):
522
+ if value is None:
523
+ break
524
+ else:
525
+ yield example["idx"], example
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"afqmc": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://dc.cloud.alipay.com/index#/topic/data?id=8", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "afqmc", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 378726, "num_examples": 3861, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 3396535, "num_examples": 34334, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 426293, "num_examples": 4316, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/afqmc_public.zip": {"num_bytes": 1195044, "checksum": "5a4cb1556b833010c329fa2ad2207d9e98fc94071b7e474015e9dd7c385db4dc"}}, "download_size": 1195044, "post_processing_size": 0, "dataset_size": 4201554, "size_in_bytes": 5396598}, "tnews": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/skdjfla/toutiao-text-classfication-dataset", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 15, "names": ["100", "101", "102", "103", "104", "106", "107", "108", "109", "110", "112", "113", "114", "115", "116"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "tnews", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 797765, "num_examples": 10000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 4245701, "num_examples": 53360, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 797926, "num_examples": 10000, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/tnews_public.zip": {"num_bytes": 4689325, "checksum": "2469c4205606e24118c7de08199fbd55da483b65128e1d9c1f380849797f6ce0"}}, "download_size": 4689325, "post_processing_size": 0, "dataset_size": 5841392, "size_in_bytes": 10530717}, "iflytek": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 119, "names": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100", "101", "102", "103", "104", "105", "106", "107", "108", "109", "110", "111", "112", "113", "114", "115", "116", "117", "118"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "iflytek", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2105688, "num_examples": 2600, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 10028613, "num_examples": 12133, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2157123, "num_examples": 2599, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/iflytek_public.zip": {"num_bytes": 6505938, "checksum": "c59b961b29f1d0bad0c5e01aa62e4a61a80e9cfb980ce89b06c000851fbb3b06"}}, "download_size": 6505938, "post_processing_size": 0, "dataset_size": 14291424, "size_in_bytes": 20797362}, "cmnli": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["neutral", "entailment", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "cmnli", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2386837, "num_examples": 13880, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 67685309, "num_examples": 391783, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2051845, "num_examples": 12241, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/cmnli_public.zip": {"num_bytes": 31404066, "checksum": "3a3f3b1d3d27134cf11e585156f07fa050bd0a0836821c02696af0dbaa14513b"}}, "download_size": 31404066, "post_processing_size": 0, "dataset_size": 72123991, "size_in_bytes": 103528057}, "cluewsc2020": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["true", "false"], "names_file": null, "id": null, "_type": "ClassLabel"}, "target": {"span1_text": {"dtype": "string", "id": null, "_type": "Value"}, "span2_text": {"dtype": "string", "id": null, "_type": "Value"}, "span1_index": {"dtype": "int32", "id": null, "_type": "Value"}, "span2_index": {"dtype": "int32", "id": null, "_type": "Value"}}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "cluewsc2020", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 66295, "num_examples": 290, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 288828, "num_examples": 1244, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 72682, "num_examples": 304, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/cluewsc2020_public.zip": {"num_bytes": 88526, "checksum": "5e318e8c2c88256ea66819a24348a65d0824ade15c0f02c5a14b78d240d38afb"}}, "download_size": 88526, "post_processing_size": 0, "dataset_size": 427805, "size_in_bytes": 516331}, "csl": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/P01son6415/CSL", "license": "", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "corpus_id": {"dtype": "int32", "id": null, "_type": "Value"}, "abst": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}, "keyword": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "csl", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2463740, "num_examples": 3000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 16478914, "num_examples": 20000, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2464575, "num_examples": 3000, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/csl_public.zip": {"num_bytes": 3234594, "checksum": "795d1a2e475d59acad8236f6c5baba7a0b43d3e0508cb60f15ffbc76d5f437c4"}}, "download_size": 3234594, "post_processing_size": 0, "dataset_size": 21407229, "size_in_bytes": 24641823}, "cmrc2018": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": " @article{cmrc2018-dataset,\n title={A Span-Extraction Dataset for Chinese Machine Reading Comprehension},\n author={Cui, Yiming and Liu, Ting and Xiao, Li and Chen, Zhipeng and Ma, Wentao and Che, Wanxiang and Wang, Shijin and Hu, Guoping},\n journal={arXiv preprint arXiv:1810.07366},\n year={2018}\n}\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://hfl-rc.github.io/cmrc2018/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}, "trial": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "cmrc2018", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3112066, "num_examples": 2000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 15508110, "num_examples": 10142, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 5183809, "num_examples": 3219, "dataset_name": "clue"}, "trial": {"name": "trial", "num_bytes": 1606931, "num_examples": 1002, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/cmrc2018_public.zip": {"num_bytes": 3405146, "checksum": "6c63dc27e728ec5231aeb7d2861b4c90b6c116390582e0c44416cf3edf030b16"}}, "download_size": 3405146, "post_processing_size": 0, "dataset_size": 25410916, "size_in_bytes": 28816062}, "drcd": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/DRCKnowledgeTeam/DRCD", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "drcd", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4982402, "num_examples": 3493, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 37443458, "num_examples": 26936, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 5222753, "num_examples": 3524, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/drcd_public.zip": {"num_bytes": 7264200, "checksum": "f03a38bded37572e224b69b822794eca6218f9584afc0918bf8aa2bc77cf968d"}}, "download_size": 7264200, "post_processing_size": 0, "dataset_size": 47648613, "size_in_bytes": 54912813}, "chid": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": " @article{Zheng_2019,\n title={ChID: A Large-scale Chinese IDiom Dataset for Cloze Test},\n url={http://dx.doi.org/10.18653/v1/P19-1075},\n DOI={10.18653/v1/p19-1075},\n journal={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},\n publisher={Association for Computational Linguistics},\n author={Zheng, Chujie and Huang, Minlie and Sun, Aixin},\n year={2019}\n}\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://arxiv.org/abs/1906.01265", "license": "", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "candidates": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "content": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_id": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "chid", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 9733464, "num_examples": 3231, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 252478178, "num_examples": 84709, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 10117789, "num_examples": 3218, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/chid_public.zip": {"num_bytes": 133322133, "checksum": "4cdf93c0a9dfa43113284c501d8948bbc17bd6a55e3b0b4314a31909562ec19d"}}, "download_size": 133322133, "post_processing_size": 0, "dataset_size": 272329431, "size_in_bytes": 405651564}, "c3": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "@article{sun2020investigating,\n author = {Kai Sun and\n Dian Yu and\n Dong Yu and\n Claire Cardie},\n title = {Investigating Prior Knowledge for Challenging Chinese Machine Reading\n Comprehension},\n journal = {Trans. Assoc. Comput. Linguistics},\n volume = {8},\n pages = {141--155},\n year = {2020},\n url = {https://transacl.org/ojs/index.php/tacl/article/view/1882}\n }\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://arxiv.org/abs/1904.09679", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "context": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "choice": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}, "train": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "c3", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3025700, "num_examples": 3892, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 9672787, "num_examples": 11869, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2990967, "num_examples": 3816, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/c3_public.zip": {"num_bytes": 3200727, "checksum": "15e98a52512116efc2382c82496069ae75b7944cbd144230ef0f7f8150a1de7c"}}, "download_size": 3200727, "post_processing_size": 0, "dataset_size": 15689454, "size_in_bytes": 18890181}, "diagnostics": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["neutral", "entailment", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": {"features": null, "resources_checksums": {"test": {}}}, "supervised_keys": null, "builder_name": "clue", "config_name": "diagnostics", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 42400, "num_examples": 514, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/clue_diagnostics_public.zip": {"num_bytes": 12062, "checksum": "56b52e70c195686557a966c6064c9bdc4dece1de8c89551f89ad046637e9a7c4"}}, "download_size": 12062, "post_processing_size": 0, "dataset_size": 42400, "size_in_bytes": 54462}}
dummy/afqmc/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb72e53cc087ee9cc1eac6ebbeb184f3310d0de88fe65000c04fe8bc45979663
3
+ size 2347
dummy/c3/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb325b417f8ee772f68a85e4785a9245c50f7332589a30e1ddf1e0f5e7eb8d5
3
+ size 5546
dummy/chid/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd8d263daf979bb8293347f9f5f6bde6683985a7bd5c437a62968b550da8ccc
3
+ size 8874
dummy/cluewsc2020/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61aa66f8940026a5442ef1ddfe1aa24c789cdb8c2806923063a749d72d5ecd66
3
+ size 2769
dummy/cmnli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88df6d24ac7c4838c4e004a2ee522afce891710b6e9331a0bc2108ec5e5560fa
3
+ size 3749
dummy/cmrc2018/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f7cd412604897e829ec6ba32fe166341152889fa286de140c1993932201abd3
3
+ size 7803
dummy/csl/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f56b3a79e1982fb61e2655c1957cca0a3b9c627ae1d3237ffc1063f5b5a77a72
3
+ size 3983
dummy/diagnostics/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cc44f25b1d1444259982a6bd1188ebf071f1818b818dc19d4dde4ecac8d8638
3
+ size 869
dummy/drcd/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e89fb0e042e2e407597a13057a0ffbdc431264893f30ccf267ae628fdd2faf82
3
+ size 6811
dummy/iflytek/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a649bb3c1f4abbd1b950b484f262f4a1ef269bf6b27f253b192fadebc2b61d7d
3
+ size 4236
dummy/tnews/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:191ff9ce9b80b43d2df5a6d4e221b2d6a072a463d4cce1fd167eb9c8653bd172
3
+ size 3463