Heerak commited on
Commit
9c9fada
1 Parent(s): 9e2af8d

Upload ai_hub_summarization.py

Browse files
Files changed (1) hide show
  1. ai_hub_summarization.py +78 -0
ai_hub_summarization.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import datasets
3
+
4
+ _CITATION = """"""
5
+ _DESCRIPTION = """"""
6
+ _LICENSE = "CC-BY-SA-4.0"
7
+ # _URL = "https://github.com/boostcampaitech2/data-annotation-nlp-level3-nlp-14"
8
+ _DATA_URLS = {
9
+ "train": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/train_abstract_summary_v1.0.json",
10
+ "dev": "https://huggingface.co/datasets/raki-1203/ai_hub_summarization/resolve/main/train_abstract_summary_v1.0.json",
11
+ }
12
+
13
+ _VERSION = "0.0.0"
14
+
15
+
16
+ class AiHubSummarizationConfig(datasets.BuilderConfig):
17
+ def __init__(self, data_url, **kwargs):
18
+ super().__init__(version=datasets.Version(_VERSION), **kwargs)
19
+ self.data_url = data_url
20
+
21
+
22
+ class AiHubSummarization(datasets.GeneratorBasedBuilder):
23
+ DEFAULT_CONFIG_NAME = "pred"
24
+ BUILDER_CONFIGS = [
25
+ AiHubSummarizationConfig(
26
+ name="ai_hub_summarization",
27
+ data_url=_DATA_URLS,
28
+ description=_DESCRIPTION,
29
+ )
30
+ ]
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "passage": datasets.Value("string"),
38
+ "abstract_summary": datasets.Value("string"),
39
+ }
40
+ ),
41
+ license=_LICENSE,
42
+ citation=_CITATION,
43
+ supervised_keys=None,
44
+ )
45
+
46
+ def _split_generators(self, dl_manager):
47
+ """ Returns SplitGenerators. """
48
+ data_file = dl_manager.download_and_extract(self.config.data_url)
49
+ return [
50
+ datasets.SplitGenerator(
51
+ name=datasets.Split.TRAIN,
52
+ gen_kwargs={
53
+ "data_file": data_file["train"],
54
+ "split": "train",
55
+ },
56
+ ),
57
+ datasets.SplitGenerator(
58
+ name=datasets.Split.VALIDATION,
59
+ gen_kwargs={
60
+ "data_file": data_file["dev"],
61
+ "split": "valid",
62
+ },
63
+ ),
64
+ ]
65
+
66
+ def _generate_examples(self, data_file: str, split: str):
67
+ """ Yields examples. """
68
+ with open(data_file, newline='', encoding="UTF-8") as csvfile:
69
+ reader = csv.reader(csvfile, delimiter=',')
70
+ feature_names = next(reader)
71
+ idx = 0
72
+ for row in reader:
73
+ features = {
74
+ "passage": row[0],
75
+ "abstract_summary": row[1],
76
+ }
77
+ yield idx, features
78
+ idx += 1