Spaces:
Running
Running
import gradio as gr | |
import torch | |
from transformers import AutoProcessor, BarkModel | |
import scipy | |
# device = "cuda" if torch.cuda.is_available() else "cpu" | |
# model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device) | |
# model.enable_cpu_offload() | |
device = "cpu" | |
processor = AutoProcessor.from_pretrained("suno/bark-small") | |
model = BarkModel.from_pretrained("suno/bark-small").to(device) | |
num_list = ["1","2","3","4","5","6","7","8","9","10"] | |
lang_list = ["en","fr"] | |
def run_bark(text, n, lang): | |
#history_prompt = [] | |
semantic_prompt=f"v2/{lang}_speaker_{int(n)}" | |
#text=["Hello, my name is Suno. And, uh β and I like pizza. [laughs] But I also have other interests such as playing tic tac toe."], | |
inputs = processor(text=text, | |
voice_preset = semantic_prompt, | |
return_tensors="pt", | |
) | |
speech_values = model.generate(**inputs, do_sample=True) | |
sampling_rate = model.generation_config.sample_rate | |
#sampling_rate = model.config.sample_rate | |
#sampling_rate = 24000 | |
scipy.io.wavfile.write("bark_out.wav", rate=sampling_rate, data=speech_values.cpu().numpy().squeeze()) | |
return ("bark_out.wav") | |
with gr.Blocks() as app: | |
with gr.Column(): | |
in_text = gr.Textbox() | |
with gr.Row(): | |
speaker_num = gr.Dropdown(label="Speaker Voice", choices=num_list,value="1") | |
speaker_lang = gr.Dropdown(label="Speaker Language", choices=lang_list,value="1") | |
#speaker_num = gr.Number(value=0) | |
go_btn = gr.Button() | |
with gr.Column(): | |
out_audio = gr.Audio() | |
go_btn.click(run_bark,[in_text, speaker_num, speaker_lang],out_audio) | |
app.launch() |