Datasets:
Update files from the datasets library (from 1.6.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.6.0
- wmt_utils.py +5 -11
wmt_utils.py
CHANGED
@@ -16,7 +16,6 @@
|
|
16 |
# Lint as: python3
|
17 |
"""WMT: Translate dataset."""
|
18 |
|
19 |
-
from __future__ import absolute_import, division, print_function
|
20 |
|
21 |
import codecs
|
22 |
import functools
|
@@ -28,8 +27,6 @@ import re
|
|
28 |
import xml.etree.cElementTree as ElementTree
|
29 |
from abc import ABC, abstractmethod
|
30 |
|
31 |
-
import six
|
32 |
-
|
33 |
import datasets
|
34 |
|
35 |
|
@@ -61,7 +58,7 @@ builder = datasets.builder("wmt_translate", config=config)
|
|
61 |
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
|
62 |
|
63 |
|
64 |
-
class SubDataset
|
65 |
"""Class to keep track of information on a sub-dataset of WMT."""
|
66 |
|
67 |
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
|
@@ -89,8 +86,8 @@ class SubDataset(object):
|
|
89 |
manual_dl_files: `<list>(string)` (optional), the list of files that must
|
90 |
be manually downloaded to the data directory.
|
91 |
"""
|
92 |
-
self._paths = (path,) if isinstance(path,
|
93 |
-
self._urls = (url,) if isinstance(url,
|
94 |
self._manual_dl_files = manual_dl_files if manual_dl_files else []
|
95 |
self.name = name
|
96 |
self.target = target
|
@@ -941,11 +938,8 @@ def _parse_tmx(path):
|
|
941 |
return segs[0].text
|
942 |
|
943 |
with open(path, "rb") as f:
|
944 |
-
|
945 |
-
|
946 |
-
utf_f = codecs.getreader("utf-8")(f)
|
947 |
-
else:
|
948 |
-
utf_f = f
|
949 |
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
|
950 |
if elem.tag == "tu":
|
951 |
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
|
|
|
16 |
# Lint as: python3
|
17 |
"""WMT: Translate dataset."""
|
18 |
|
|
|
19 |
|
20 |
import codecs
|
21 |
import functools
|
|
|
27 |
import xml.etree.cElementTree as ElementTree
|
28 |
from abc import ABC, abstractmethod
|
29 |
|
|
|
|
|
30 |
import datasets
|
31 |
|
32 |
|
|
|
58 |
CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
|
59 |
|
60 |
|
61 |
+
class SubDataset:
|
62 |
"""Class to keep track of information on a sub-dataset of WMT."""
|
63 |
|
64 |
def __init__(self, name, target, sources, url, path, manual_dl_files=None):
|
|
|
86 |
manual_dl_files: `<list>(string)` (optional), the list of files that must
|
87 |
be manually downloaded to the data directory.
|
88 |
"""
|
89 |
+
self._paths = (path,) if isinstance(path, str) else path
|
90 |
+
self._urls = (url,) if isinstance(url, str) else url
|
91 |
self._manual_dl_files = manual_dl_files if manual_dl_files else []
|
92 |
self.name = name
|
93 |
self.target = target
|
|
|
938 |
return segs[0].text
|
939 |
|
940 |
with open(path, "rb") as f:
|
941 |
+
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
|
942 |
+
utf_f = codecs.getreader("utf-8")(f)
|
|
|
|
|
|
|
943 |
for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
|
944 |
if elem.tag == "tu":
|
945 |
yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
|