nllb-spa-awa-v3 / README.md
hectordiazgomez's picture
Update README.md
cb0edda verified
metadata
language:
  - spa
  - agr
tags:
  - translation
  - nllb
  - aguaruna
  - spanish
license: apache-2.0
metrics:
  - bleu: 40.38

NLLB-200-600M Fine-tuned for Aguaruna-Spanish Translation

This is a NLLB-200-600M model fine-tuned for translating between Aguaruna and Spanish languages.

How to use the model:

!pip install sentencepiece transformers==4.33
import torch
from transformers import NllbTokenizer, AutoModelForSeq2SeqLM

def fix_tokenizer(tokenizer, new_lang='agr_Latn'):
    old_len = len(tokenizer) - int(new_lang in tokenizer.added_tokens_encoder)
    tokenizer.lang_code_to_id[new_lang] = old_len-1
    tokenizer.id_to_lang_code[old_len-1] = new_lang
    tokenizer.fairseq_tokens_to_ids["<mask>"] = len(tokenizer.sp_model) + len(tokenizer.lang_code_to_id) + tokenizer.fairseq_offset

    tokenizer.fairseq_tokens_to_ids.update(tokenizer.lang_code_to_id)
    tokenizer.fairseq_ids_to_tokens = {v: k for k, v in tokenizer.fairseq_tokens_to_ids.items()}
    if new_lang not in tokenizer._additional_special_tokens:
        tokenizer._additional_special_tokens.append(new_lang)
    tokenizer.added_tokens_encoder = {}
    tokenizer.added_tokens_decoder = {}

MODEL_URL = "hectordiazgomez/nllb-spa-awa-v3"
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL)
tokenizer = NllbTokenizer.from_pretrained(MODEL_URL)
fix_tokenizer(tokenizer)

def translate(
    text,
    model,
    tokenizer,
    src_lang='agr_Latn',
    tgt_lang='spa_Latn',
    max_length='auto',
    num_beams=4,
    n_out=None,
    **kwargs
):
    tokenizer.src_lang = src_lang
    encoded = tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
    if max_length == 'auto':
        max_length = int(32 + 2.0 * encoded.input_ids.shape[1])
    model.eval()
    generated_tokens = model.generate(
        **encoded.to(model.device),
        forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang],
        max_length=max_length,
        num_beams=num_beams,
        num_return_sequences=n_out or 1,
        **kwargs
    )
    out = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
    if isinstance(text, str) and n_out is None:
        return out[0]
    return 

translate("Uchi piipichi buuke baejai.", model=model, tokenizer=tokenizer)
# El niño se quedo con el pelo.