import os
import torch
from transformers import T5Tokenizer, T5ForConditionalGeneration

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

print("input:")
input_text = input()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model_dir = '../dcg-backup/models/icse/t5-small-icse-100000'
model_dir = 'output/models/finetuned-model/Pytorch-Model'

# Tokenization
tokenizer = T5Tokenizer.from_pretrained("output/models/finetuned-model/Pytorch-Model/dl4se_vocab.model")


model = T5ForConditionalGeneration.from_pretrained(model_dir)
model.to(device)
model.eval()

torch.manual_seed(0)

with torch.no_grad():
    tokenized_text = tokenizer(input_text, truncation=True, padding=True, return_tensors='pt')

    source_ids = tokenized_text['input_ids'].to(device, dtype=torch.long)
    source_mask = tokenized_text['attention_mask'].to(device, dtype=torch.long)

    generated_ids = model.generate(
        input_ids = source_ids,
        attention_mask = source_mask,
        max_length=512,
        num_beams=10,
        repetition_penalty=1,
        length_penalty=1,
        early_stopping=True,
        no_repeat_ngram_size=2,
        num_return_sequences=3
    )
    #
    # pred = tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
    # print("\noutput:\n" + pred)

    # generated_ids = model.generate(
    #     input_ids=source_ids,
    #     attention_mask=source_mask,
    #     do_sample=True,
    #     max_length=512,
    #     top_k=50,
    #     top_p=0.95,
    #     num_return_sequences=3
    # )

    print("Output:\n" + 100 * '-')
    for i, generated_id in enumerate(generated_ids):
        print("{}: {}".format(i, tokenizer.decode(generated_id, skip_special_tokens=True)))
    print(100 * '-')
