versae commited on
Commit
cee8760
1 Parent(s): 7cf41ef

Latest version of translator

Browse files
Files changed (1) hide show
  1. translator.py +24 -8
translator.py CHANGED
@@ -46,7 +46,7 @@ def split_into_chunks(text, tokenizer, max_tokens=128):
46
  return chunks
47
 
48
 
49
- def to_lang_code(texts, lang_code, model, tokenizer, max_tokens=128):
50
  is_string = isinstance(texts, str)
51
  if is_string:
52
  texts = [texts]
@@ -66,7 +66,7 @@ def to_lang_code(texts, lang_code, model, tokenizer, max_tokens=128):
66
  translated_tokens = model.generate(
67
  **inputs,
68
  forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
69
- max_new_tokens=512,
70
  # max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
71
  )
72
  translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
@@ -75,7 +75,7 @@ def to_lang_code(texts, lang_code, model, tokenizer, max_tokens=128):
75
  outputs = []
76
  start = 0
77
  for length in lengths:
78
- outputs.append(" ".join(translated_texts[start:start + length]))
79
  start += length
80
  return outputs[0] if is_string else outputs
81
 
@@ -90,6 +90,8 @@ def main(
90
  dataset_revision: Optional[str]=None,
91
  source_lang: Optional[str]="eng_Latn",
92
  target_langs: Optional[Union[list, tuple]]=("nob_Latn", "nno_Latn"),
 
 
93
  batch_size: Optional[int]=24,
94
  output_dir: Optional[Path]=Path("./"),
95
  ) -> None:
@@ -101,9 +103,21 @@ def main(
101
  )
102
 
103
  for lang_code in target_langs:
 
 
 
 
 
104
  for split in dataset_splits:
105
  ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision, split=split)
106
- translate = partial(to_lang_code, lang_code=lang_code, model=model, tokenizer=tokenizer)
 
 
 
 
 
 
 
107
  ds = ds.map(
108
  lambda batch: {
109
  column: translate(batch[column])
@@ -113,12 +127,10 @@ def main(
113
  batch_size=batch_size,
114
  desc=f"Translating to {lang_code} ({split})",
115
  )
116
- lang_code_short = re.split(r"[-_ /]", lang_code)[0]
117
- ds.save_to_disk(output_dir / lang_code_short / split, max_shard_size="1GB")
118
-
119
  json_filename = f"{lang_code_short}_{split}.json.gz".lower()
120
  ds.to_pandas().to_json(
121
- output_dir / lang_code_short / json_filename, orient='records', lines=True
122
  )
123
 
124
 
@@ -134,6 +146,8 @@ if __name__ == "__main__":
134
  parser.add_argument('--model_revision')
135
  parser.add_argument('--source_lang', default="eng_Latn")
136
  parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")
 
 
137
  parser.add_argument('--batch_size', '-bs', default=24, type=int, help='Number of inputs per batch for prediction')
138
  parser.add_argument('--output_dir', '-o', default="./", type=str)
139
  args = parser.parse_args()
@@ -147,6 +161,8 @@ if __name__ == "__main__":
147
  model_revision=args.model_revision,
148
  source_lang=args.source_lang,
149
  target_langs=args.target_langs.split(","),
 
 
150
  batch_size=args.batch_size,
151
  output_dir=Path(args.output_dir),
152
  )
 
46
  return chunks
47
 
48
 
49
+ def to_lang_code(texts, lang_code, model, tokenizer, max_tokens=128, sentence_joiner=" "):
50
  is_string = isinstance(texts, str)
51
  if is_string:
52
  texts = [texts]
 
66
  translated_tokens = model.generate(
67
  **inputs,
68
  forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
69
+ max_length=512, # max_new_tokens=512,
70
  # max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
71
  )
72
  translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
 
75
  outputs = []
76
  start = 0
77
  for length in lengths:
78
+ outputs.append(sentence_joiner.join(translated_texts[start:start + length]))
79
  start += length
80
  return outputs[0] if is_string else outputs
81
 
 
90
  dataset_revision: Optional[str]=None,
91
  source_lang: Optional[str]="eng_Latn",
92
  target_langs: Optional[Union[list, tuple]]=("nob_Latn", "nno_Latn"),
93
+ sentence_joiner: Optional[str]=" ",
94
+ max_tokens_per_chunk: Optional[int]=128,
95
  batch_size: Optional[int]=24,
96
  output_dir: Optional[Path]=Path("./"),
97
  ) -> None:
 
103
  )
104
 
105
  for lang_code in target_langs:
106
+ lang_code_short = re.split(r"[-_ /]", lang_code)[0]
107
+ if dataset_config:
108
+ output_path = output_dir / dataset_config / lang_code_short
109
+ else:
110
+ output_path = output_dir / lang_code_short
111
  for split in dataset_splits:
112
  ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision, split=split)
113
+ translate = partial(
114
+ to_lang_code,
115
+ lang_code=lang_code,
116
+ model=model,
117
+ tokenizer=tokenizer,
118
+ sentence_joiner=sentence_joiner,
119
+ max_tokens=max_tokens_per_chunk,
120
+ )
121
  ds = ds.map(
122
  lambda batch: {
123
  column: translate(batch[column])
 
127
  batch_size=batch_size,
128
  desc=f"Translating to {lang_code} ({split})",
129
  )
130
+ ds.save_to_disk(output_path / split, max_shard_size="1GB")
 
 
131
  json_filename = f"{lang_code_short}_{split}.json.gz".lower()
132
  ds.to_pandas().to_json(
133
+ output_path / json_filename, orient='records', lines=True
134
  )
135
 
136
 
 
146
  parser.add_argument('--model_revision')
147
  parser.add_argument('--source_lang', default="eng_Latn")
148
  parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")
149
+ parser.add_argument('--sentence_joiner', default=" ", help="String to join sentences split for translation")
150
+ parser.add_argument('--max_tokens_per_chunk', default=128, type=int, help="Max number of tokens for each chunk for translation")
151
  parser.add_argument('--batch_size', '-bs', default=24, type=int, help='Number of inputs per batch for prediction')
152
  parser.add_argument('--output_dir', '-o', default="./", type=str)
153
  args = parser.parse_args()
 
161
  model_revision=args.model_revision,
162
  source_lang=args.source_lang,
163
  target_langs=args.target_langs.split(","),
164
+ sentence_joiner=args.sentence_joiner,
165
+ max_tokens_per_chunk=args.max_tokens_per_chunk,
166
  batch_size=args.batch_size,
167
  output_dir=Path(args.output_dir),
168
  )