|
from functools import partial |
|
|
|
import torch |
|
|
|
from datasets import load_dataset |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
|
|
model_name = "facebook/nllb-200-3.3B" |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=True, torch_dtype=torch.float32) |
|
model.to(device, torch.float32, True) |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_name, use_auth_token=True, src_lang="eng_Latn" |
|
) |
|
|
|
|
|
def to_lang_code(text, lang_code): |
|
inputs = tokenizer(text, return_tensors="pt").to(device) |
|
translated_tokens = model.generate( |
|
**inputs, |
|
forced_bos_token_id=tokenizer.lang_code_to_id[lang_code], |
|
max_length=int(len(inputs.tokens()) * 1.5) |
|
) |
|
return tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] |
|
|
|
|
|
if __name__ == "__main__": |
|
languages = (("nb", "nob_Latn"), ("nn", "nno_Latn")) |
|
ds = load_dataset("paws-x", "en") |
|
dss = {} |
|
for lang, translate_code in languages: |
|
translate = partial(to_lang_code, lang_code=translate_code) |
|
dss[lang] = ds.map(lambda example: { |
|
"sentence1": translate(example["sentence1"]), |
|
"sentence2": translate(example["sentence2"]), |
|
}, desc=f"Translating to {lang}") |
|
for split in ("test", "validation", "train"): |
|
json_lines = dss[lang][split].to_pandas().to_json(orient='records', lines=True) |
|
with open(f"{lang}_{split}.json", "w") as json_file: |
|
json_file.write(json_lines) |
|
|