"""
https://huggingface.co/docs/transformers/tasks/translation
"""
from PyCmpltrtok.common import sep
import sys
import os

sep('Tokenizer')
from transformers import AutoTokenizer

# checkpoint = "google-t5/t5-small"
checkpoint = os.path.join(os.environ['HOME'], ".cache/huggingface/hub/models--google-t5--t5-small/snapshots/df1b051c49625cf57a3d0d8d3863ed4d13564fe4")
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
print(tokenizer)

source_lang = "en"
target_lang = "fr"
prefix = "translate English to French: "


from transformers import DataCollatorForSeq2Seq
from PyCmpltrtok.common import ld2dl, dl2ld

data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)
print(data_collator)


sep('Model')
from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer

model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
print(model)

sep('My Infer')
import torch
dev = torch.device('cuda:0')
model = model.to(dev)

sentences = [
    prefix + 'How are you?',
    prefix + 'Nice to meet you! How do you do?</s>',
]
print(sentences)

xxinputs = tokenizer(sentences)
xxinputs['decoder_input_ids'] = [[0, ], [0, ]]
xxinputs_collated = data_collator(dl2ld(xxinputs))
xxinputs_collated_gpu = {k: xxinputs_collated[k].to(dev) for k in xxinputs_collated.keys()}
sep('input_ids decoded')
print(tokenizer.batch_decode(xxinputs_collated_gpu['input_ids']))


sep('My Infer')
N = 16
past_key_values = None
xxin = xxinputs_collated_gpu
xxids = xxinputs_collated_gpu['decoder_input_ids']
xxsents = [
    '',
    '',
]

for i in range(N):
    xxin = {k: v for k, v in xxinputs_collated_gpu.items() if k != 'decoder_input_ids'}
    xxout = model(**xxin, past_key_values=past_key_values, decoder_input_ids=xxids)
    xxlogits_all = xxout['logits']
    past_key_values = xxout['past_key_values']  # kv cache
    xxlogits = xxlogits_all[:, -1:, :]
    the_next_ids = xxlogits.argmax(dim=-1)

    xxids = torch.cat([xxids, the_next_ids], dim=-1)  # cat
    
    xxwords = tokenizer.batch_decode(the_next_ids)
    
    print(i, xxwords)
    for ii in range(2):
        xxsents[ii] += " " + xxwords[ii]

sep('Results')
print(xxsents)

sep('Decoded')
print(tokenizer.batch_decode(xxids))
print(tokenizer.batch_decode(xxids, skip_special_tokens=True))