Spaces:
Runtime error
Runtime error
File size: 2,912 Bytes
e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 e5499d2 f852f45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import io
from threading import Thread
import time
import numpy as np
import gradio as gr
import torch
from pydub import AudioSegment
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
from parler_tts import ParlerTTSForConditionalGeneration
from streamer import ParlerTTSStreamer
# Device and model setup
device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
torch_dtype = torch.float16 if device != "cpu" else torch.float32
repo_id = "parler-tts/parler_tts_mini_v0.1"
jenny_repo_id = "ylacombe/parler-tts-mini-jenny-30H"
model = ParlerTTSForConditionalGeneration.from_pretrained(
jenny_repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True
).to(device)
tokenizer = AutoTokenizer.from_pretrained(repo_id)
feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
SAMPLE_RATE = feature_extractor.sampling_rate
SEED = 42
frame_rate = model.audio_encoder.config.frame_rate
# Helper to convert audio to MP3
def numpy_to_mp3(audio_array, sampling_rate):
if np.issubdtype(audio_array.dtype, np.floating):
max_val = np.max(np.abs(audio_array))
audio_array = (audio_array / max_val) * 32767
audio_array = audio_array.astype(np.int16)
audio_segment = AudioSegment(
audio_array.tobytes(),
frame_rate=sampling_rate,
sample_width=audio_array.dtype.itemsize,
channels=1
)
mp3_io = io.BytesIO()
audio_segment.export(mp3_io, format="mp3", bitrate="320k")
return mp3_io.getvalue()
# TTS Function using fixed text
def speak_fixed_text():
text = "This is a demo of Parler-TTS generating a voice from fixed text input."
description = "A calm, clear female voice speaking in a natural tone."
description_tokens = tokenizer(description, return_tensors="pt").to(device)
prompt = tokenizer(text, return_tensors="pt").to(device)
play_steps = int(frame_rate * 2.0)
streamer = ParlerTTSStreamer(model, device=device, play_steps=play_steps)
generation_kwargs = dict(
input_ids=description_tokens.input_ids,
prompt_input_ids=prompt.input_ids,
streamer=streamer,
do_sample=True,
temperature=1.0,
min_new_tokens=10,
)
set_seed(SEED)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
start = time.time()
for new_audio in streamer:
print(f"Audio sample: {round(new_audio.shape[0] / SAMPLE_RATE, 2)} sec (elapsed {round(time.time() - start, 2)} sec)")
return numpy_to_mp3(new_audio, sampling_rate=SAMPLE_RATE)
# Minimal Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## 🔊 Text-to-Speech Demo")
output_audio = gr.Audio(label="Generated Audio", streaming=True, autoplay=True)
generate_btn = gr.Button("Generate Voice")
generate_btn.click(fn=speak_fixed_text, outputs=output_audio)
demo.launch()
|