keshan commited on
Commit
573b8b7
1 Parent(s): 32a6213

Create wit-dataset.py

Browse files
Files changed (1) hide show
  1. wit-dataset.py +212 -0
wit-dataset.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ logger = datasets.logging.get_logger(__name__)
4
+ _DESCRIPTION = """\
5
+ Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset.
6
+ WIT is composed of a curated set of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages.
7
+ Its size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
8
+ """
9
+ _CITATION = """
10
+ @article{srinivasan2021wit,
11
+ title={WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning},
12
+ author={Srinivasan, Krishna and Raman, Karthik and Chen, Jiecao and Bendersky, Michael and Najork, Marc},
13
+ journal={arXiv preprint arXiv:2103.01913},
14
+ year={2021}
15
+ }
16
+ """
17
+ _URL = "https://github.com/google-research-datasets/wit"
18
+ _DATA_URL = "https://huggingface.co/datasets/keshan/wit-dataset/resolve/7e65a989e0d2e48c33b86309c37e9eadfc063b9f/data/{language}.tar.gz"
19
+ _LANGUAGES = [
20
+ 'ms',
21
+ 'eu',
22
+ 'si',
23
+ 'Prakrit',
24
+ 'ko',
25
+ 'nv',
26
+ 'id',
27
+ 'tg',
28
+ 'mn',
29
+ 'fa',
30
+ 'bg',
31
+ 'ia',
32
+ 'ca',
33
+ 'jv',
34
+ 'vi',
35
+ 'ja',
36
+ 'bs',
37
+ 'te',
38
+ 'war',
39
+ 'hy',
40
+ 'sv',
41
+ 'az',
42
+ 'lah',
43
+ 'ht',
44
+ 'sl',
45
+ 'pt',
46
+ 'an',
47
+ 'br',
48
+ 'nn',
49
+ 'ceb',
50
+ 'ce',
51
+ 'qu',
52
+ 'gl',
53
+ 'fy',
54
+ 'vec',
55
+ 'zh',
56
+ 'iw',
57
+ 'vo',
58
+ 'xmf',
59
+ 'nds',
60
+ 'bar',
61
+ 'ba',
62
+ 'sr-Latn',
63
+ 'hsb',
64
+ 'yue',
65
+ 'arz',
66
+ 'es',
67
+ 'bn',
68
+ 'de',
69
+ 'mk',
70
+ 'pa',
71
+ 'zh-TW',
72
+ 'io',
73
+ 'lb',
74
+ 'azb',
75
+ 'ga',
76
+ 'cs',
77
+ 'fi',
78
+ 'cv',
79
+ 'sr',
80
+ 'lv',
81
+ 'my',
82
+ 'mg',
83
+ 'hu',
84
+ 'it',
85
+ 'kk',
86
+ 'be',
87
+ 'sq',
88
+ 'ru',
89
+ 'ar',
90
+ 'cy',
91
+ 'hr',
92
+ 'be-tarask',
93
+ 'is',
94
+ 'tt',
95
+ 'mr',
96
+ 'ro',
97
+ 'en',
98
+ 'fil',
99
+ 'uz',
100
+ 'af',
101
+ 'et',
102
+ 'fr',
103
+ 'no',
104
+ 'ckb',
105
+ 'nan',
106
+ 'sw',
107
+ 'la',
108
+ 'lmo',
109
+ 'th',
110
+ 'ta',
111
+ 'ast',
112
+ 'eo',
113
+ 'tr',
114
+ 'uk',
115
+ 'ur',
116
+ 'ne',
117
+ 'kn',
118
+ 'da',
119
+ 'nl',
120
+ 'ka',
121
+ 'pl',
122
+ 'el',
123
+ 'sco',
124
+ 'hi',
125
+ 'sk',
126
+ 'oc',
127
+ 'lt',
128
+ 'ml'
129
+ ]
130
+
131
+ class WITConfig(datasets.BuilderConfig):
132
+ """BuilderConfig for WIT."""
133
+ def __init__(self, *args, languages, **kwargs):
134
+ """BuilderConfig for WIT.
135
+ Args:
136
+ languages (:obj:`List[str]`): list of languages to load
137
+ **kwargs: keyword arguments forwarded to super.
138
+ """
139
+ super().__init__(
140
+ *args,
141
+ name="+".join(languages),
142
+ **kwargs,
143
+ )
144
+ self.languages = languages
145
+
146
+ class WIT(datasets.GeneratorBasedBuilder):
147
+ """WIT, WIT to be used as a pretraining dataset for multimodal machine learning models."""
148
+ BUILDER_CONFIGS = [WITConfig(languages=[lang]) for lang in _LANGUAGES]
149
+ BUILDER_CONFIG_CLASS = WITConfig
150
+ def _info(self):
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=datasets.Features(
154
+ {
155
+ "language": datasets.Value("string"),
156
+ "page_url": datasets.Value("string"),
157
+ "image_url": datasets.Value("string"),
158
+ "page_title": datasets.Value("string"),
159
+ "section_title": datasets.Value("string"),
160
+ "hierarchical_section_title": datasets.Value("string"),
161
+ "caption_reference_description": datasets.Value("string"),
162
+ "caption_attribution_description": datasets.Value("string"),
163
+ "caption_alt_text_description": datasets.Value("string"),
164
+ "mime_type": datasets.Value("string"),
165
+ "original_height": datasets.Value("int8"),
166
+ "original_width": datasets.Value("int8"),
167
+ "is_main_image": datasets.Value("bool"),
168
+ "attribution_passes_lang_id": datasets.Value("string"),
169
+ "page_changed_recently": datasets.Value("string"),
170
+ "context_page_description": datasets.Value("string"),
171
+ "context_section_description": datasets.Value("string"),
172
+ }
173
+ ),
174
+ supervised_keys=None,
175
+ homepage=_URL,
176
+ citation=_CITATION,
177
+ )
178
+
179
+ def _split_generators(self, dl_manager):
180
+ abs_path_to_data = dl_manager.download_and_extract(
181
+ _DATA_URL.format(language=self.config.name)
182
+ )
183
+ return [
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.TRAIN,
186
+ gen_kwargs={
187
+ "filepath": abs_path_to_data,
188
+ },
189
+ ),
190
+ ]
191
+
192
+ def _generate_examples(self, filepath):
193
+ data_fields = list(self._info().features.keys())
194
+ path_idx = data_fields.index("image_url")
195
+ # ToDO: Remove after debugging..
196
+ print(path_to_data)
197
+ with open(path_to_data, encoding="utf-8") as f:
198
+ lines = f.readlines()
199
+ headline = line[0]
200
+
201
+ column_names = headline.strip().split('\t')
202
+ assert (
203
+ column_names == data_fields
204
+ ), f"The file should have {data_fields} as column names, but has {column_names}"
205
+
206
+ for id_, line in enumerate(lines[1:]):
207
+ field_values = line.strip().split("\t")
208
+ # if data is incomplete, fill with empty values
209
+ if len(field_values) < len(data_fields):
210
+ field_values += (len(data_fields) - len(field_values)) * ["''"]
211
+
212
+ yield id_, {key: value for key, value in zip(data_fields, field_values)}