Upload ai_hub_summarization.py
Browse files- ai_hub_summarization.py +10 -2
ai_hub_summarization.py
CHANGED
@@ -9,7 +9,8 @@ _LICENSE = "CC-BY-SA-4.0"
|
|
9 |
# _URL = "https://github.com/boostcampaitech2/data-annotation-nlp-level3-nlp-14"
|
10 |
_DATA_URLS = {
|
11 |
"train": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/train_dict.json",
|
12 |
-
"
|
|
|
13 |
}
|
14 |
|
15 |
_VERSION = "0.0.0"
|
@@ -62,10 +63,17 @@ class AiHubSummarization(datasets.GeneratorBasedBuilder):
|
|
62 |
datasets.SplitGenerator(
|
63 |
name=datasets.Split.VALIDATION,
|
64 |
gen_kwargs={
|
65 |
-
"data_file": data_file["
|
66 |
"split": "valid",
|
67 |
},
|
68 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
]
|
70 |
|
71 |
def _generate_examples(self, data_file: str, split: str):
|
|
|
9 |
# _URL = "https://github.com/boostcampaitech2/data-annotation-nlp-level3-nlp-14"
|
10 |
_DATA_URLS = {
|
11 |
"train": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/train_dict.json",
|
12 |
+
"valid": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/valid_dict.json",
|
13 |
+
"test": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/test_dict.json",
|
14 |
}
|
15 |
|
16 |
_VERSION = "0.0.0"
|
|
|
63 |
datasets.SplitGenerator(
|
64 |
name=datasets.Split.VALIDATION,
|
65 |
gen_kwargs={
|
66 |
+
"data_file": data_file["valid"],
|
67 |
"split": "valid",
|
68 |
},
|
69 |
),
|
70 |
+
datasets.SplitGenerator(
|
71 |
+
name=datasets.Split.VALIDATION,
|
72 |
+
gen_kwargs={
|
73 |
+
"data_file": data_file["test"],
|
74 |
+
"split": "test",
|
75 |
+
},
|
76 |
+
),
|
77 |
]
|
78 |
|
79 |
def _generate_examples(self, data_file: str, split: str):
|