Update wit-dataset.py
Browse filesfixing filepath variable
- wit-dataset.py +3 -4
wit-dataset.py
CHANGED
@@ -1,8 +1,7 @@
|
|
1 |
import datasets
|
2 |
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
-
_DESCRIPTION = """
|
5 |
-
Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset.
|
6 |
WIT is composed of a curated set of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages.
|
7 |
Its size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
|
8 |
"""
|
@@ -193,8 +192,8 @@ class WIT(datasets.GeneratorBasedBuilder):
|
|
193 |
data_fields = list(self._info().features.keys())
|
194 |
path_idx = data_fields.index("image_url")
|
195 |
# ToDO: Remove after debugging..
|
196 |
-
print(
|
197 |
-
with open(
|
198 |
lines = f.readlines()
|
199 |
headline = line[0]
|
200 |
|
|
|
1 |
import datasets
|
2 |
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
+
_DESCRIPTION = """\\nWikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset.
|
|
|
5 |
WIT is composed of a curated set of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages.
|
6 |
Its size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
|
7 |
"""
|
|
|
192 |
data_fields = list(self._info().features.keys())
|
193 |
path_idx = data_fields.index("image_url")
|
194 |
# ToDO: Remove after debugging..
|
195 |
+
print(filepath)
|
196 |
+
with open(filepath, encoding="utf-8") as f:
|
197 |
lines = f.readlines()
|
198 |
headline = line[0]
|
199 |
|