import gradio as gr from transformers import T5ForConditionalGeneration, T5Tokenizer import torch title = "Yoda Translation" description = "This is my attempt at doing a Yoda Translator using T5. Feed it with about 20 examples and trained for 40 epochs." article = "

⚡ Blog post 👉 Deploying GPT-J Models on a Telegram Bot with Hugging Face Hub - For Free

" examples = [ ['English To Yoda', "I am sick of you."], ['Yoda To English','A challenge lifelong it is, not to bend fear into anger'], ['Yoda To English','Luminous beings are we…not this crude matter'] ] device = "cuda" if torch.cuda.is_available() else "cpu" print(device) tokenizer = T5Tokenizer.from_pretrained('stephenleejm/T5_yoda_translator') t5_model = T5ForConditionalGeneration.from_pretrained('stephenleejm/T5_yoda_translator') t5_model.to(device) def test_text(s, task): test_sent = task + ' ' + s # +' ' test_tokenized = tokenizer.encode_plus(test_sent, return_tensors="pt").to(device) test_input_ids = test_tokenized["input_ids"] test_attention_mask = test_tokenized["attention_mask"] t5_model.eval() beam_outputs = t5_model.generate( input_ids=test_input_ids, attention_mask=test_attention_mask, max_length=96, early_stopping=True, num_beams=10, num_return_sequences=3, no_repeat_ngram_size=2 ) answer = "" for beam_output in beam_outputs: sent = tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True) print(sent) answer += sent + "\n" return answer def run(dropdown, text): dropdown = "e_to_y:" if dropdown == "English To Yoda" else "y_to_e:" print(dropdown) return test_text(text, dropdown) gr.Interface(fn=run, inputs=[ gr.inputs.Dropdown(["Yoda To English", "English To Yoda"], label="Task"), gr.inputs.Textbox(lines=5, label="Input Text")], outputs="text", title=title, description=description, examples=examples).launch()