Spaces:
Build error
Build error
File size: 2,426 Bytes
647b983 055de34 4b64e87 647b983 73832d7 647b983 73832d7 0176a7b 647b983 d92712d 647b983 de719c1 40f6e71 647b983 4b64e87 647b983 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import os
os.system("pip install gradio==3.3")
import gradio as gr
import numpy as np
import streamlit as st
from audio_pipe import SpeechToSpeechPipeline
title = "SpeechMatrix Speech-to-speech Translation"
description = "Gradio Demo for SpeechMatrix. To use it, simply record your audio, or click the example to load. Read more at the links below. \nNote: These models are trained on SpeechMatrix data only, and meant to serve as a baseline for future research."
article = "<p style='text-align: center'><a href='https://research.facebook.com/publications/speechmatrix' target='_blank'>SpeechMatrix</a> | <a href='https://github.com/facebookresearch/fairseq/tree/ust' target='_blank'>Github Repo</a></p>"
# SRC_LIST = ['cs', 'de', 'en', 'es', 'et', 'fi', 'fr', 'hr', 'hu', 'it', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl']
SRC_LIST = ['cs', 'de', 'en', 'es', 'et', 'fi', 'fr', 'hr', 'hu', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl']
TGT_LIST = ['en', 'fr', 'es']
MODEL_LIST = ['xm_transformer_sm_all-en']
for src in SRC_LIST:
for tgt in TGT_LIST:
if src != tgt:
MODEL_LIST.append(f"textless_sm_{src}_{tgt}")
# for textless_sm_it_fr, textless_sm_it_es
# size mismatch for source_unit_decoder.embed_tokens.weight: copying a param with shape torch.Size([804, 256]) from checkpoint, the shape in current model is torch.Size([1004, 256]).
# size mismatch for source_unit_decoder.output_projection.weight: copying a param with shape torch.Size([804, 256]) from checkpoint, the shape in current model is torch.Size([1004, 256]).
examples = []
pipe_dict = {}
# io_dict = {model: gr.Interface.load(f"huggingface/facebook/{model}", api_key=st.secrets["api_key"]) for model in MODEL_LIST}
# pipe_dict = {model: SpeechToSpeechPipeline(f"facebook/{model}") for model in MODEL_LIST}
for model in MODEL_LIST:
print(f"model: {model}")
pipe_dict[model] = SpeechToSpeechPipeline(f"facebook/{model}")
def inference(audio, model):
out_audio = pipe_dict[model](audio).get_config()["value"]["name"]
return out_audio
gr.Interface(
inference,
[gr.inputs.Audio(source="microphone", type="filepath", label="Input"),gr.inputs.Dropdown(choices=MODEL_LIST, default="xm_transformer_sm_all-en",type="value", label="Model")
],
gr.outputs.Audio(label="Output"),
article=article,
title=title,
examples=examples,
cache_examples=False,
description=description).queue().launch() |