Upload essay_kaggle.py
Browse files- essay_kaggle.py +62 -0
essay_kaggle.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
_URLS = {
|
8 |
+
"seed_0_fold_0": {
|
9 |
+
"train": "data/essay_0_0_train.jsonl.gz",
|
10 |
+
"validation": "data/essay_0_0_vali.jsonl.gz",
|
11 |
+
"test": "data/essay_0_0_test.jsonl.gz",
|
12 |
+
},
|
13 |
+
"seed_0_fold_1": {
|
14 |
+
"train": "data/essay_0_1_train.jsonl.gz",
|
15 |
+
"validation": "data/essay_0_1_vali.jsonl.gz",
|
16 |
+
"test": "data/essay_0_1_test.jsonl.gz",
|
17 |
+
},
|
18 |
+
}
|
19 |
+
|
20 |
+
|
21 |
+
class Essay(datasets.GeneratorBasedBuilder):
|
22 |
+
|
23 |
+
BUILDER_CONFIGS = [
|
24 |
+
datasets.BuilderConfig(name="seed_0_fold_0"),
|
25 |
+
datasets.BuilderConfig(name="seed_0_fold_1"),
|
26 |
+
]
|
27 |
+
DEFAULT_CONFIG_NAME = "seed_0_fold_0"
|
28 |
+
|
29 |
+
def _info(self):
|
30 |
+
|
31 |
+
return datasets.DatasetInfo(
|
32 |
+
features=datasets.Features({
|
33 |
+
'essay_id': Value('string'),
|
34 |
+
'full_text': Value('string'),
|
35 |
+
'score': ClassLabel(names = [1, 2, 3, 4, 5, 6],
|
36 |
+
id = [0, 1, 2, 3, 4, 5]
|
37 |
+
)
|
38 |
+
})
|
39 |
+
|
40 |
+
def _seed_0_fold_0_generators(self, dl_manager):
|
41 |
+
"""Returns seed_0_fold_0Generators."""
|
42 |
+
paths = dl_manager.download_and_extract(_URLS[self.config.name])
|
43 |
+
if self.config.name == "seed_0_fold_0":
|
44 |
+
return [
|
45 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths["train"]}),
|
46 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": paths["validation"]}),
|
47 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": paths["test"]}),
|
48 |
+
]
|
49 |
+
else:
|
50 |
+
return [
|
51 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths["train"]}),
|
52 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": paths["validation"]}),
|
53 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": paths["test"]}),
|
54 |
+
]
|
55 |
+
|
56 |
+
|
57 |
+
def _generate_examples(self, filepath):
|
58 |
+
"""Generate examples."""
|
59 |
+
with open(filepath, encoding="utf-8") as f:
|
60 |
+
for idx, line in enumerate(f):
|
61 |
+
example = json.loads(line)
|
62 |
+
yield idx, example
|