sudarshan85 commited on
Commit
1ad7203
1 Parent(s): acb598f

TEDHRLR done. Not working

Browse files
Files changed (1) hide show
  1. rosetta_balcanica.py +63 -75
rosetta_balcanica.py CHANGED
@@ -17,13 +17,14 @@
17
  #!/usr/bin/env python
18
 
19
  import datasets, logging
 
20
 
21
  logging.basicConfig(format='[%(name)s] %(levelname)s -> %(message)s')
22
  logger = logging.getLogger(__name__)
23
  logger.setLevel(logging.DEBUG)
24
 
25
- _SUPPORTED_WB_LANGS = ['ma', 'sh']
26
- _VALID_LANGUAGE_PAIRS = [('en', wb_lang) for wb_lang in _SUPPORTED_WB_LANGS]
27
 
28
  # TODO: Add BibTeX citation
29
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -38,104 +39,91 @@ _CITATION="""\
38
  _DESCRIPTION="""
39
  Rosetta-Balcanica is a set of evaluation datasets for low resource western Balkan languages manually sourced from articles from OSCE website.
40
  """
 
41
  _HOMEPAGE='https://github.com/ebegoli/rosetta-balcanica'
42
- URL='https://github.com/ebegoli/rosetta-balcanica/raw/main/rosetta_balcanica.tar.gz'
43
  _VERSION=datasets.Version('1.0.0')
44
 
45
  class RosettaBalcanicaConfig(datasets.BuilderConfig):
46
- """BuilderConfig for Rosetta Balcanica
47
  """
48
 
49
- def __init__(self, wb_lang, **kwargs):
50
- logger.debug("i'm in config")
 
 
51
  super(RosettaBalcanicaConfig, self).__init__(
52
- name=f'en-{wb_lang}',
53
- description=f'Translation dataset from en to {wb_lang}',
54
  version=_VERSION,
55
  **kwargs
56
  )
57
 
58
- # validate language
59
- assert wb_lang in supported_wb, (f"Supported West Balkan languages are {supported_wb}, got {wb_lang}")
60
- self.wb_lang = wb_lang
61
 
62
  class RoesettaBalcancia(datasets.GeneratorBasedBuilder):
63
  logger.debug("i'm in builder")
64
  BUILDER_CONFIGS = [
65
  RosettaBalcanicaConfig(
66
- wb_lang=wb_lang,
 
67
  )
68
- for wb_lang in supported_wb
69
  ]
70
 
71
  def _info(self):
72
- source,target = 'en', self.config.wb_lang
73
- features = datasets.Features(
74
- {
75
- 'id': datasets.Value('string'),
76
- 'translation': datasets.features.Translation(languages=(source, target))
77
- }
78
- )
79
-
80
  return datasets.DatasetInfo(
81
  description=_DESCRIPTION,
82
- features=features,
83
- supervised_keys=None,
 
84
  homepage=_HOMEPAGE,
 
85
  citation=_CITATION,
86
  )
87
 
88
  def _split_generators(self, dl_manager):
89
- logger.debug("_split_generators")
90
- wb_lang = self.config.wb_lang
91
- # lang_pair = f'en-{wb_lang}'
92
-
93
- # data_dir = f'~/rosetta_balcanica/{lang_pair}'
94
- # files = {}
95
- # for split in ('train', 'test'):
96
- # files[split] = {
97
- # 'en': f'{data_dir}/{split}_en.txt',
98
- # f'{wb_lang}': f'{data_dir}/{split}_{wb_lang}.txt'
99
- # }
100
-
101
- # return [
102
- # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files['train']),
103
- # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files['test']),
104
- # ]
105
-
106
- # train_dls = {
107
- # 'en': f'{data_dir}/{lang_pair}/train_en.txt',
108
- # f'{wb_lang}': f'{data_dir}/{lang_pair}/train_{wb_lang}.txt',
109
- # }
110
- # test_dls = {
111
- # 'en': f'{data_dir}/{lang_pair}/test_en.txt',
112
- # f'{wb_lang}': f'{data_dir}/{lang_pair}/test_{wb_lang}.txt',
113
- # }
114
-
115
- train_dls = dl_manager.download_and_extract(_URLs[lang_pair]['train'])
116
- test_dls = dl_manager.download_and_extract(_URLs[lang_pair]['test'])
117
-
118
-
119
- return [
120
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': train_dls}),
121
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': test_dls}),
122
- ]
123
-
124
- def _generate_examples(self, en_path, wb_path):
125
- wb_lang = self.config.wb_lang
126
- with open(en_path, encoding='utf-8') as f1, open(wb_path, encoding='utf-8') as f2:
127
- for sent_counter, (en_sent, wb_sent) in enumerate(zip(f1, f2)):
128
- en_sent = en_sent.strip()
129
- wb_sent = wb_sent.strip()
130
- result = (
131
- sent_counter,
132
- {
133
- 'id': str(sent_counter),
134
- 'translation': {
135
- 'en': en_sent,
136
- f'{wb_lang}': wb_sent
137
- }
138
- }
139
- )
140
- yield result
141
 
 
17
  #!/usr/bin/env python
18
 
19
  import datasets, logging
20
+ from itertools import permutations
21
 
22
  logging.basicConfig(format='[%(name)s] %(levelname)s -> %(message)s')
23
  logger = logging.getLogger(__name__)
24
  logger.setLevel(logging.DEBUG)
25
 
26
+ _SUPPORTED_WB_LANGS = ['en', 'ma', 'sh']
27
+ _VALID_LANGUAGE_PAIRS = [lang_pair for lang_pair in permutations(_SUPPORTED_WB_LANGS, 2) if lang_pair[0] == 'en' or lang_pair[1] == 'en']
28
 
29
  # TODO: Add BibTeX citation
30
  # Find for instance the citation on arxiv or on the dataset repo/website
 
39
  _DESCRIPTION="""
40
  Rosetta-Balcanica is a set of evaluation datasets for low resource western Balkan languages manually sourced from articles from OSCE website.
41
  """
42
+
43
  _HOMEPAGE='https://github.com/ebegoli/rosetta-balcanica'
44
+ _DATA_URL='https://github.com/ebegoli/rosetta-balcanica/raw/main/rosetta_balcanica.tar.gz'
45
  _VERSION=datasets.Version('1.0.0')
46
 
47
  class RosettaBalcanicaConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for Rosetta Balcanica for low resource West Balcan languages
49
  """
50
 
51
+ def __init__(self, lang_pair=(None, None), **kwargs):
52
+ assert lang_pair in _VALID_LANGUAGE_PAIRS, (f"Language pair {lang_pair} not supported (yet)")
53
+ name = f'{lang_pair[0]} to {lang_pair[1]}'
54
+ desc = f'Translation dataset from {lang_pair[0]} to {lang_pair[1]}'
55
  super(RosettaBalcanicaConfig, self).__init__(
56
+ name=name,
57
+ description=desc,
58
  version=_VERSION,
59
  **kwargs
60
  )
61
 
62
+ self.lang_pair = lang_pair
 
 
63
 
64
  class RoesettaBalcancia(datasets.GeneratorBasedBuilder):
65
  logger.debug("i'm in builder")
66
  BUILDER_CONFIGS = [
67
  RosettaBalcanicaConfig(
68
+ lang_pair=lang_pair,
69
+ versino=_VERSION,
70
  )
71
+ for lang_pair in _VALID_LANGUAGE_PAIRS
72
  ]
73
 
74
  def _info(self):
 
 
 
 
 
 
 
 
75
  return datasets.DatasetInfo(
76
  description=_DESCRIPTION,
77
+ features=datasets.Features(
78
+ {'translation': datasets.features.Translation(languages=self.config.lang_pair)}
79
+ ),
80
  homepage=_HOMEPAGE,
81
+ supervised_keys=self.config.lang_pair,
82
  citation=_CITATION,
83
  )
84
 
85
  def _split_generators(self, dl_manager):
86
+ archive = dl_manager.download(_DATA_URL)
87
+ source,target = self.config.lang_pair
88
+ non_en = source if target == 'en' else target
89
+ data_dir = f'en-{non_en}'
90
+
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ 'source_file': f'{data_dir}/train_{source}.txt',
96
+ 'target_file': f'{data_dir}/train_{target}.txt',
97
+ 'files': dl_manager.iter_archive(archive)
98
+ }
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TEST,
102
+ gen_kwargs={
103
+ 'source_file': f'{data_dir}/test_{source}.txt',
104
+ 'target_file': f'{data_dir}/test_{target}.txt',
105
+ 'files': dl_manager.iter_archive(archive)
106
+ }
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, source_file, target_file, files):
111
+ source_sents, target_sents = None, None
112
+ for path, f in files:
113
+ if path == source_file:
114
+ source_sents = f.read().decode('utf-8').split('\n')
115
+ elif path == target_file:
116
+ target_sents = f.read().decode('utf-8').split('\n')
117
+ if source_sents is not None and target_sents is not None:
118
+ break
119
+
120
+ assert len(target_sents) == len(source_sents), (f"Sizes do not match: {len(source_sents) vs len(target_sents)} for {source_file} vs {target_file}")
121
+
122
+ source,target = self.config.lang_pair
123
+ for idx, (l1, l2) in enumerate(zip(source_sents, target_sents)):
124
+ result = {
125
+ 'translation': {source: l1, target: l2}
126
+ }
127
+ if all(result.values()):
128
+ yield idx, result
 
 
 
 
 
 
 
 
 
129