abhik1505040 commited on
Commit
73b5e30
1 Parent(s): 6450b91

Create crosssum.py

Browse files
Files changed (1) hide show
  1. crosssum.py +153 -0
crosssum.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CrossSum cross-lingual abstractive summarization dataset."""
2
+
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ _CITATION = """\
11
+ @article{hasan2021crosssum,
12
+ author = {Tahmid Hasan and Abhik Bhattacharjee and Wasi Uddin Ahmad and Yuan-Fang Li and Yong-bin Kang and Rifat Shahriyar},
13
+ title = {CrossSum: Beyond English-Centric Cross-Lingual Abstractive Text Summarization for 1500+ Language Pairs},
14
+ journal = {CoRR},
15
+ volume = {abs/2112.08804},
16
+ year = {2021},
17
+ url = {https://arxiv.org/abs/2112.08804},
18
+ eprinttype = {arXiv},
19
+ eprint = {2112.08804}
20
+ }
21
+ """
22
+
23
+
24
+ _DESCRIPTION = """\
25
+ We present CrossSum, a large-scale dataset
26
+ comprising 1.70 million cross-lingual article summary samples in 1500+ language-pairs
27
+ constituting 45 languages. We use the multilingual XL-Sum dataset and align identical
28
+ articles written in different languages via crosslingual retrieval using a language-agnostic
29
+ representation model.
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/csebuetnlp/CrossSum"
33
+
34
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
35
+
36
+ _URL = "https://huggingface.co/datasets/csebuetnlp/CrossSum/resolve/main/data/{}-{}_CrossSum.tar.bz2"
37
+
38
+ _LANGUAGES = [
39
+ "oromo",
40
+ "french",
41
+ "amharic",
42
+ "arabic",
43
+ "azerbaijani",
44
+ "bengali",
45
+ "burmese",
46
+ "chinese_simplified",
47
+ "chinese_traditional",
48
+ "welsh",
49
+ "english",
50
+ "kirundi",
51
+ "gujarati",
52
+ "hausa",
53
+ "hindi",
54
+ "igbo",
55
+ "indonesian",
56
+ "japanese",
57
+ "korean",
58
+ "kyrgyz",
59
+ "marathi",
60
+ "spanish",
61
+ "scottish_gaelic",
62
+ "nepali",
63
+ "pashto",
64
+ "persian",
65
+ "pidgin",
66
+ "portuguese",
67
+ "punjabi",
68
+ "russian",
69
+ "serbian_cyrillic",
70
+ "serbian_latin",
71
+ "sinhala",
72
+ "somali",
73
+ "swahili",
74
+ "tamil",
75
+ "telugu",
76
+ "thai",
77
+ "tigrinya",
78
+ "turkish",
79
+ "ukrainian",
80
+ "urdu",
81
+ "uzbek",
82
+ "vietnamese",
83
+ "yoruba",
84
+ ]
85
+
86
+
87
+ class Crosssum(datasets.GeneratorBasedBuilder):
88
+
89
+ BUILDER_CONFIGS = [
90
+ datasets.BuilderConfig(
91
+ name="{}-{}".format(src_lang, tgt_lang),
92
+ version=datasets.Version("1.0.0")
93
+ )
94
+ for src_lang in _LANGUAGES
95
+ for tgt_lang in _LANGUAGES
96
+ ]
97
+
98
+ def _info(self):
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=datasets.Features(
102
+ {
103
+ "source_url": datasets.Value("string"),
104
+ "target_url": datasets.Value("string"),
105
+ "summary": datasets.Value("string"),
106
+ "text": datasets.Value("string"),
107
+ }
108
+ ),
109
+ supervised_keys=None,
110
+ homepage=_HOMEPAGE,
111
+ citation=_CITATION,
112
+ license=_LICENSE,
113
+ version=self.VERSION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager):
117
+ """Returns SplitGenerators."""
118
+ lang = str(self.config.name)
119
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
120
+
121
+ data_dir = dl_manager.download_and_extract(url)
122
+ return [
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TRAIN,
125
+ gen_kwargs={
126
+ "filepath": os.path.join(data_dir, lang + "_train.jsonl"),
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST,
131
+ gen_kwargs={
132
+ "filepath": os.path.join(data_dir, lang + "_test.jsonl"),
133
+ },
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.VALIDATION,
137
+ gen_kwargs={
138
+ "filepath": os.path.join(data_dir, lang + "_val.jsonl"),
139
+ },
140
+ ),
141
+ ]
142
+
143
+ def _generate_examples(self, filepath):
144
+ """Yields examples as (key, example) tuples."""
145
+ with open(filepath, encoding="utf-8") as f:
146
+ for idx_, row in enumerate(f):
147
+ data = json.loads(row)
148
+ yield idx_, {
149
+ "source_url": data["source_url"],
150
+ "target_url": data["target_url"],
151
+ "summary": data["summary"],
152
+ "text": data["text"],
153
+ }