may-ohta commited on
Commit
309d724
1 Parent(s): 55b4aac

Create damt.py

Browse files
Files changed (1) hide show
  1. damt.py +119 -0
damt.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """Multi-domain German-English parallel dataset for Domain Adapted Machine Translation."""
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @inproceedings{koehn-knowles-2017-six,
8
+ title = "Six Challenges for Neural Machine Translation",
9
+ author = "Koehn, Philipp and
10
+ Knowles, Rebecca",
11
+ booktitle = "Proceedings of the First Workshop on Neural Machine Translation",
12
+ month = aug,
13
+ year = "2017",
14
+ address = "Vancouver",
15
+ publisher = "Association for Computational Linguistics",
16
+ url = "https://aclanthology.org/W17-3204",
17
+ doi = "10.18653/v1/W17-3204",
18
+ pages = "28--39",
19
+ }
20
+ @inproceedings{aharoni2020unsupervised,
21
+ title={Unsupervised domain clusters in pretrained language models},
22
+ author={Aharoni, Roee and Goldberg, Yoav},
23
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
24
+ year={2020},
25
+ url={https://arxiv.org/abs/2004.02105},
26
+ publisher = "Association for Computational Linguistics"
27
+ }
28
+ """
29
+
30
+ _URL = "https://drive.google.com/file/d/1yvB-pvlojtT2UpOX1JvwtD6rw9joQ49A/view"
31
+
32
+ _HOMEPAGE = "https://github.com/roeeaharoni/unsupervised-domain-clusters"
33
+
34
+ _DOMAIN = ["it", "koran", "law", "medical", "subtitles"]
35
+
36
+
37
+ class DAMTConfig(datasets.BuilderConfig):
38
+ """BuilderConfig for DAMT Dataset"""
39
+
40
+ def __init__(self, domain=None, **kwargs):
41
+ """
42
+ Args:
43
+ domain: domain name.
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(DAMTConfig, self).__init__(
47
+ name=domain,
48
+ description="multi-domain German-English parallel dataset for Domain Adapted Machine Translation.",
49
+ version=datasets.Version("1.0.0", ""),
50
+ **kwargs,
51
+ )
52
+
53
+ # Validate domain name.
54
+ assert domain in _DOMAIN
55
+
56
+ self.domain = domain
57
+
58
+
59
+ class DAMT(datasets.GeneratorBasedBuilder):
60
+ """Multi-domain German-English parallel dataset for Domain Adapted Machine Translation."""
61
+
62
+ BUILDER_CONFIGS = [DAMTConfig(domain=d) for d in _DOMAIN]
63
+
64
+ def _info(self):
65
+ return datasets.DatasetInfo(
66
+ # This is the description that will appear on the datasets page.
67
+ description="multi-domain German-English parallel dataset for Domain Adapted Machine Translation",
68
+ # datasets.features.FeatureConnectors
69
+ features=datasets.Features(
70
+ {"translation": datasets.features.Translation(languages=("en", "de"))}
71
+ ),
72
+ # If there's a common (input, target) tuple from the features,
73
+ # specify them here. They'll be used if as_supervised=True in
74
+ # builder.as_dataset.
75
+ supervised_keys=None,
76
+ # Homepage of the dataset for documentation
77
+ homepage=_HOMEPAGE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators."""
83
+
84
+ domain = self.config.domain
85
+
86
+ def _get_drive_url(url):
87
+ return f"https://drive.google.com/uc?id={url.split('/')[5]}"
88
+
89
+ dl_dir = dl_manager.download_and_extract(_get_drive_url(_URL))
90
+ files = {
91
+ "train": {
92
+ "en_file": f"{dl_dir}/{domain}/train.en",
93
+ "de_file": f"{dl_dir}/{domain}/train.de",
94
+ },
95
+ "validation": {
96
+ "en_file": f"{dl_dir}/{domain}/dev.en",
97
+ "de_file": f"{dl_dir}/{domain}/dev.de",
98
+ },
99
+ "test": {
100
+ "en_file": f"{dl_dir}/{domain}/test.en",
101
+ "de_file": f"{dl_dir}/{domain}/test.de",
102
+ },
103
+ }
104
+
105
+ return [
106
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
107
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["validation"]),
108
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
109
+ ]
110
+
111
+ def _generate_examples(self, en_file, de_file):
112
+ """Yields examples."""
113
+
114
+ id_ = 0
115
+ with open(en_file, "r", encoding="utf-8") as en_f:
116
+ with open(de_file, "r", encoding="utf-8") as de_f:
117
+ for en, de in zip(en_f, de_f):
118
+ yield id_, {"translation": {"en": en.strip(), "de": de.strip()}}
119
+ id_ += 1