XiangBo commited on
Commit
89dad06
โ€ข
1 Parent(s): 4ac2048

Upload 2 files

Browse files
Files changed (2) hide show
  1. CMB-datasets.zip +2 -2
  2. CMB.py +269 -0
CMB-datasets.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b68664817a66f9493f7f8a794ee7888c316cfe38df5943275479c9a073dc3cdc
3
- size 36506484
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ea94b865030f847fc56b5e81f240b9ed7135de904af9b66bc867b85813b2f58
3
+ size 36507969
CMB.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
18
+
19
+ import csv
20
+ import os
21
+ import sys
22
+ import json
23
+ import io
24
+ import textwrap
25
+
26
+ import numpy as np
27
+
28
+ import datasets
29
+
30
+ _CMB_CITATION = """\
31
+ coming soon~
32
+ """
33
+
34
+ _CMB_DESCRIPTION = """\
35
+
36
+ coming soon~
37
+
38
+ """
39
+
40
+ _DATASETS_FILE = "CMB-datasets.zip"
41
+
42
+
43
+ class CMBConfig(datasets.BuilderConfig):
44
+ """BuilderConfig for GLUE."""
45
+
46
+ def __init__(
47
+ self,
48
+ features,
49
+ data_url,
50
+ data_dir,
51
+ citation,
52
+ url,
53
+ **kwargs,
54
+ ):
55
+
56
+
57
+ super(CMBConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
58
+ self.features = features
59
+ self.data_url = data_url
60
+ self.data_dir = data_dir
61
+ self.citation = citation
62
+ self.url = url
63
+
64
+
65
+ class CMB(datasets.GeneratorBasedBuilder):
66
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
67
+
68
+ BUILDER_CONFIGS = [
69
+ CMBConfig(
70
+ name="main",
71
+ description=textwrap.dedent(
72
+ """\
73
+ ไธป่ฆๆ•ฐๆฎ้›†๏ผŒๅŒ…ๅซ train val test ไธ‰ไธช็ป„ๆˆ้ƒจๅˆ†."""
74
+ ),
75
+ features=datasets.Features(
76
+ {
77
+ "id": datasets.Value("string"),
78
+ "exam_type": datasets.Value("string"),
79
+ "exam_class": datasets.Value("string"),
80
+ "chapter": datasets.Value("string"),
81
+ "exam_subject": datasets.Value("string"),
82
+ "exercise": datasets.Value("string"),
83
+ "question": datasets.Value("string"),
84
+ "question_type": datasets.Value("string"),
85
+ "option": datasets.Value("string"),
86
+ "answer": datasets.Value("string"),
87
+ "explanation": datasets.Value("string")
88
+
89
+ }
90
+ ),
91
+ data_url=_DATASETS_FILE,
92
+ data_dir="CMB-main",
93
+ citation=textwrap.dedent(
94
+ """\
95
+
96
+ }"""
97
+ ),
98
+ url="https://github.com/FreedomIntelligence/CMB",
99
+ ),
100
+ CMBConfig(
101
+ name="paper-exampaper",
102
+ description=textwrap.dedent(
103
+ """\
104
+ ๅŽ†ๅฒ็œŸ้ข˜
105
+ ."""
106
+ ),
107
+ features=datasets.Features(
108
+ {
109
+ "id": datasets.Value("string"),
110
+ "source": datasets.Value("string"),
111
+ "exam_type": datasets.Value("string"),
112
+ "exam_class": datasets.Value("string"),
113
+ "exam_subject": datasets.Value("string"),
114
+ "question": datasets.Value("string"),
115
+ "question_type": datasets.Value("string"),
116
+ "option": datasets.Value("string"),
117
+ "answer": datasets.Value("string")
118
+
119
+ }
120
+ ),
121
+ data_url=_DATASETS_FILE,
122
+ data_dir="CMB-test-exampaper",
123
+ citation=textwrap.dedent(
124
+ """\
125
+
126
+ }"""
127
+ ),
128
+ url="https://github.com/FreedomIntelligence/CMB",
129
+ ),
130
+ CMBConfig(
131
+ name="qa",
132
+ description=textwrap.dedent(
133
+ """\
134
+ QA ๆ ผๅผ็š„่€ƒ้ข˜
135
+ """
136
+ ),
137
+ features=datasets.Features(
138
+ {
139
+ "id": datasets.Value("string"),
140
+ "title": datasets.Value("string"),
141
+ "description": datasets.Value("string"),
142
+ "QA_pairs": datasets.Value("string")
143
+
144
+ }
145
+ ),
146
+
147
+ data_url=_DATASETS_FILE,
148
+ data_dir="CMB-test-qa",
149
+ citation=textwrap.dedent(
150
+ """\
151
+
152
+ }"""
153
+ ),
154
+ url="https://github.com/FreedomIntelligence/CMB",
155
+ ),
156
+
157
+ ]
158
+
159
+ def _info(self):
160
+
161
+ return datasets.DatasetInfo(
162
+ description=_CMB_DESCRIPTION,
163
+ features=self.config.features,
164
+ homepage=self.config.url,
165
+ citation=self.config.citation + "\n" + _CMB_CITATION,
166
+ )
167
+
168
+ def _split_generators(self, dl_manager):
169
+ if self.config.name == "main":
170
+ data_file = dl_manager.extract(self.config.data_url)
171
+ main_data_dir = os.path.join(data_file, self.config.data_dir)
172
+
173
+ return [
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TRAIN,
176
+ gen_kwargs={
177
+ "data_file": os.path.join(main_data_dir, 'CMB-train', 'CMB-train-merge.json'),
178
+ "split": "train",
179
+ },
180
+ )
181
+ ,
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.VALIDATION,
184
+ gen_kwargs={
185
+ "data_file": os.path.join(main_data_dir, 'CMB-val', 'CMB-val-merge.json'),
186
+ "split": "val",
187
+ },
188
+ )
189
+ ,
190
+ datasets.SplitGenerator(
191
+ name=datasets.Split.TEST,
192
+ gen_kwargs={
193
+ "data_file": os.path.join(main_data_dir, 'CMB-test', 'CMB-test-choice-question-merge.json'),
194
+ "split": "test",
195
+ },
196
+ )
197
+ ]
198
+
199
+ if self.config.name == "paper-exampaper":
200
+ data_file = dl_manager.extract(self.config.data_url)
201
+ main_data_dir = os.path.join(data_file, self.config.data_dir)
202
+ return [
203
+
204
+
205
+ datasets.SplitGenerator(
206
+ name=datasets.Split.TEST,
207
+ gen_kwargs={
208
+ "data_file": os.path.join(main_data_dir, 'CMB-test-zhenti-merge.json'),
209
+ "split": "test",
210
+ },
211
+ )
212
+ ]
213
+
214
+ if self.config.name == "qa":
215
+ data_file = dl_manager.extract(self.config.data_url)
216
+ main_data_dir = os.path.join(data_file, self.config.data_dir)
217
+ return [
218
+
219
+
220
+ datasets.SplitGenerator(
221
+ name=datasets.Split.TEST,
222
+ gen_kwargs={
223
+ "data_file": os.path.join(main_data_dir, 'CMB-test-qa.json'),
224
+ "split": "test",
225
+ },
226
+ )
227
+ ]
228
+
229
+
230
+ def _generate_examples(self, data_file, split, mrpc_files=None):
231
+
232
+ if self.config.name == 'main':
233
+
234
+ examples = json.loads(io.open(data_file, 'r').read())
235
+
236
+ for idx in range(len(examples)):
237
+ vals = examples[idx]
238
+ vals['explanation'] = vals.get('explanation','')
239
+ vals['exercise'] = vals.get('exercise','')
240
+ vals['chapter'] = vals.get('chapter','')
241
+ vals['answer'] = vals.get('answer','')
242
+ vals['id'] = vals.get('id',idx)
243
+ yield idx, vals
244
+
245
+ if self.config.name == 'paper-exampaper':
246
+ examples = json.loads(io.open(data_file, 'r').read())
247
+ for idx in range(len(examples)):
248
+ vals = examples[idx]
249
+ vals['answer'] = vals.get('answer','')
250
+ vals['source'] = vals.get('source','')
251
+ vals['id'] = vals.get('id',idx)
252
+ yield idx, vals
253
+
254
+ if self.config.name == 'qa':
255
+ examples = json.loads(io.open(data_file, 'r').read())
256
+ for idx in range(len(examples)):
257
+ vals = examples[idx]
258
+ vals['id'] = vals.get('id',idx)
259
+ yield idx, vals
260
+
261
+
262
+
263
+ if __name__ == '__main__':
264
+ from datasets import load_dataset
265
+
266
+ dataset = load_dataset('CMB.py', 'main')
267
+ # dataset = load_dataset('CMB.py', 'qa')
268
+
269
+ print()