Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
import torch | |
import numpy as np | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
device = 'cpu' | |
print("Device:", device) | |
pipe_translate = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en", device=device) | |
pipe_tts = pipeline("text-to-speech", model="facebook/mms-tts-eng", device=device) # Better quality, way faster than bark | |
def get_translation(text): | |
return pipe_translate(text)[0]["translation_text"] | |
def get_audio(text): | |
speech = pipe_tts(text) | |
return speech["sampling_rate"], (speech["audio"]* 32767).astype(np.int16).T | |
with gr.Blocks() as demo: | |
input_text = gr.Textbox( | |
label="Input text", | |
info="Your text", | |
lines=3, | |
placeholder="Écrire le texte à traduire", | |
) | |
translation_button = gr.Button("Traduire...") | |
output_text = gr.Textbox( | |
label="Output text", | |
info="Your text", | |
lines=3, | |
placeholder="Votre traduction", | |
) | |
speech_button = gr.Button("Générer audio...") | |
translation_button.click( | |
get_translation, | |
inputs=[ | |
input_text | |
], | |
outputs=[ | |
output_text | |
], | |
) | |
speech_button.click( | |
get_audio, | |
inputs=[ | |
output_text | |
], | |
outputs=[ | |
gr.Audio(label="Output") | |
], | |
) | |
demo.launch() |