ericsali's picture
Upload 2 files
ccb19cd verified
import gradio as gr # Imports the Gradio library, which is used to create user interfaces for machine learning models.
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Imports the AutoTokenizer and AutoModelForSeq2SeqLM classes from the Transformers library, which will be used to tokenize and translate text.
tokenizer = AutoTokenizer.from_pretrained("t5-small") # Instantiates an AutoTokenizer object using the pre-trained T5-small model. The tokenizer is used to convert input text into a sequence of numerical values that can be used as input to the T5 model.
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") # Instantiates an AutoModelForSeq2SeqLM object using the pre-trained T5-small model. This is the model that will be used to generate translations from input text.
def translate_text(text):
inputs = tokenizer.encode("translate English to French: " + text, return_tensors="pt") # Uses the tokenizer to encode the input text as a sequence of numerical values that the T5 model can process. The text is prepended with the string "translate English to French: ", which is required by the T5 model to know which language to translate from and to. The return_tensors argument is set to "pt" to return a PyTorch tensor.
outputs = model.generate(inputs, max_length=128, num_beams=4, early_stopping=True) # Uses the T5 model to generate a translation for the input text. The generate method takes the encoded input text as input and returns a tensor containing the translated text. The max_length argument specifies the maximum length of the generated text, num_beams specifies the number of beams to use during decoding, and early_stopping specifies whether to stop generating output as soon as the model predicts an end-of-sentence token.
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Uses the tokenizer to convert the tensor of translated text back into a string. The skip_special_tokens argument specifies whether to remove special tokens like padding and end-of-sentence tokens from the decoded text.
return translated_text
output_1 = gr.Textbox(label="Speech to Text")
output_2 = gr.Textbox(label="Speech Translation")
# Creates a Gradio interface that loads the pre-trained Facebook Wav2Vec2 model for speech recognition. The input source is set to the user's microphone, and the output is set to output_1. The interface is given the title "Speech-to-text".
generator = gr.Interface.load("huggingface/facebook/wav2vec2-base-960h",
inputs="microphone",
outputs=output_1,
title="Speech-to-text",
)
# Creates a Gradio interface that uses the translate_text function defined earlier to translate English speech to French text. The input to the interface is set to output_1, which is the speech-to-text transcription
translator = gr.Interface(fn=translate_text,
inputs=output_1,
outputs=output_2,
title="English to French Translator",
description="Translate English speech to French text using the T5-small model.",
)
gr.Series(generator, translator).launch(debug=True)