nglaura commited on
Commit
1318418
1 Parent(s): 5cf9a51

Create scielo-summarization.py

Browse files
Files changed (1) hide show
  1. scielo-summarization.py +143 -0
scielo-summarization.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import datasets
4
+ from tqdm import tqdm
5
+
6
+
7
+ _ARTICLE_ID = "article_id"
8
+ _ARTICLE_WORDS = "article_words"
9
+ _ARTICLE_BBOXES = "article_bboxes"
10
+ _ARTICLE_NORM_BBOXES = "article_norm_bboxes"
11
+ _ABSTRACT = "abstract"
12
+ _ARTICLE_PDF_URL = "article_pdf_url"
13
+
14
+ def normalize_bbox(bbox, size):
15
+ return [
16
+ int(1000 * bbox[0] / size[0]),
17
+ int(1000 * bbox[1] / size[1]),
18
+ int(1000 * bbox[2] / size[0]),
19
+ int(1000 * bbox[3] / size[1]),
20
+ ]
21
+
22
+
23
+ class SciELOSummarizationConfig(datasets.BuilderConfig):
24
+ """BuilderConfig for SciELOSummarization."""
25
+ def __init__(self, lang, **kwargs):
26
+ """BuilderConfig for ArxivSummarization.
27
+ Args:
28
+ lang: language (`es` for Spanish, `pt` for Portuguese)
29
+ **kwargs: keyword arguments forwarded to super.
30
+ """
31
+ super(SciELOSummarizationConfig, self).__init__(**kwargs)
32
+ self.lang = lang
33
+
34
+
35
+ class SciELOSummarizationDataset(datasets.GeneratorBasedBuilder):
36
+ """SciELOSummarization Dataset."""
37
+
38
+ BUILDER_CONFIGS = [
39
+ SciELOSummarizationConfig(
40
+ name="scielo_es",
41
+ version=datasets.Version("1.0.0"),
42
+ description="SciELO dataset for summarization (Spanish)",
43
+ lang="es",
44
+ ),
45
+ SciELOSummarizationConfig(
46
+ name="scielo_pt",
47
+ version=datasets.Version("1.0.0"),
48
+ description="SciELO dataset for summarization (Portuguese)",
49
+ lang="pt",
50
+ ),
51
+ ]
52
+
53
+
54
+ def _info(self):
55
+ # Should return a datasets.DatasetInfo object
56
+ return datasets.DatasetInfo(
57
+ features=datasets.Features(
58
+ {
59
+ _ARTICLE_ID: datasets.Value("string"),
60
+ _ARTICLE_WORDS: datasets.Sequence(datasets.Value("string")),
61
+ _ARTICLE_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
62
+ _ARTICLE_NORM_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
63
+ _ABSTRACT: datasets.Value("string"),
64
+ _ARTICLE_PDF_URL: datasets.Value("string"),
65
+ }
66
+ ),
67
+ supervised_keys=None,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+
72
+ train_archive = self.config.lang + "_train.zip"
73
+ val_archive = self.config.lang + "_val.zip"
74
+ test_archive = self.config.lang + "_test.zip"
75
+ train_abstracts = self.config.lang + "_train.txt"
76
+ val_abstracts = self.config.lang + "_validation.txt"
77
+ test_abstracts = self.config.lang + "_test.txt"
78
+
79
+ train_dir = os.path.join(dl_manager.download_and_extract(train_archive), self.config.lang + "train")
80
+ val_dir = os.path.join(dl_manager.download_and_extract(val_archive), self.config.lang + "val")
81
+ test_dir = os.path.join(dl_manager.download_and_extract(test_archive), self.config.lang + "test")
82
+
83
+ train_abstracts = dl_manager.download_and_extract(train_abstracts)
84
+ val_abstracts = dl_manager.download_and_extract(val_abstracts)
85
+ test_abstracts = dl_manager.download_and_extract(test_abstracts)
86
+
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={"data_path": train_dir, "abstract_path": train_abstracts}
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ gen_kwargs={"data_path": val_dir, "abstract_path": val_abstracts}
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TEST,
98
+ gen_kwargs={"data_path": test_dir, "abstract_path": test_abstracts}
99
+ ),
100
+ ]
101
+
102
+ def _generate_examples(self, data_path, abstract_path):
103
+ """Generate SciELOSummarization examples."""
104
+ filenames = sorted(os.listdir(data_path))
105
+
106
+ guid = 0
107
+ with open(abstract_path, 'r') as abstract_file:
108
+ for line in tqdm(abstract_file, total=len(filenames), desc=f"Reading files in {data_path}"):
109
+ guid += 1
110
+ item = json.loads(line)
111
+ fname = item["id"] + ".txt"
112
+ filepath = os.path.join(data_path, fname)
113
+
114
+ words = []
115
+ bboxes = []
116
+ norm_bboxes = []
117
+
118
+ with open(filepath, encoding="utf-8") as f:
119
+ for line in f:
120
+ splits = line.split("\t")
121
+ word = splits[0]
122
+ bbox = splits[1:5]
123
+ bbox = [int(b) for b in bbox]
124
+ page_width, page_height = int(splits[5]), int(splits[6])
125
+ norm_bbox = normalize_bbox(bbox, (page_width, page_height))
126
+
127
+ words.append(word)
128
+ bboxes.append(bbox)
129
+ norm_bboxes.append(norm_bbox)
130
+
131
+ assert len(words) == len(bboxes)
132
+ assert len(bboxes) == len(norm_bboxes)
133
+
134
+ yield guid, {
135
+ _ARTICLE_ID: item["id"],
136
+ _ARTICLE_WORDS: words,
137
+ _ARTICLE_BBOXES: bboxes,
138
+ _ARTICLE_NORM_BBOXES: norm_bboxes,
139
+ _ABSTRACT: item["abstract"],
140
+ _ARTICLE_PDF_URL: item["pdf_url"],
141
+ }
142
+
143
+