jonathang commited on
Commit
4d47e65
1 Parent(s): 58cd952

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -0
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import concurrent.futures
3
+ import librosa
4
+ import numpy as np
5
+ import time
6
+ import functools
7
+ import soundfile as sf
8
+ from IPython import display
9
+ import os
10
+ import gradio as gr
11
+ import numpy as np
12
+ from scipy.io import wavfile
13
+ from io import BytesIO
14
+ from typing import Tuple
15
+
16
+
17
+ API_KEY = (os.environ["UBERDUCK_USER"], os.environ["UBERDUCK_PASS"])
18
+ API_URL = "https://api.uberduck.ai"
19
+
20
+ def start_synthesis(text, voice):
21
+ url = f"{API_URL}/speak"
22
+ data = {
23
+ "speech": text,
24
+ "voice": voice,
25
+ }
26
+ response = requests.post(url, auth=API_KEY, json=data)
27
+ response.raise_for_status()
28
+ return response.json()["uuid"]
29
+
30
+ def check_synthesis_status(uuid):
31
+ url = f"{API_URL}/speak-status?uuid={uuid}"
32
+ response = requests.get(url, auth=API_KEY)
33
+ response.raise_for_status()
34
+ return response.json()
35
+
36
+ def download_synthesis(url):
37
+ response = requests.get(url)
38
+ response.raise_for_status()
39
+ return response.content
40
+
41
+
42
+ @functools.cache
43
+ def download_and_process_speech(text, voice, sr):
44
+ uuid = start_synthesis(text, voice)
45
+ status = "started"
46
+
47
+ while status != "completed":
48
+ synthesis_status = check_synthesis_status(uuid)
49
+ url = synthesis_status["path"]
50
+ if url:
51
+ break
52
+ time.sleep(1)
53
+
54
+ audio_data = download_synthesis(url)
55
+ with open(f"{text}.wav", "wb") as f:
56
+ f.write(audio_data)
57
+ vocal, _ = librosa.load(f"{text}.wav", sr=sr)
58
+ return vocal
59
+
60
+
61
+ def place_vocals_on_track(instrumental_file, text_list, voice, name='output', offset=8, time_signature=4):
62
+ instrumental, sr = librosa.load(instrumental_file)
63
+ tempo, beat_frames = librosa.beat.beat_track(y=instrumental, sr=sr)
64
+ beat_times = librosa.frames_to_time(beat_frames, sr=sr)
65
+ measure_starts = beat_times[::time_signature]
66
+
67
+ vocals = [None] * len(text_list)
68
+ with concurrent.futures.ThreadPoolExecutor() as executor:
69
+ futures = {}
70
+ for i, text in enumerate(text_list):
71
+ if isinstance(voice, dict):
72
+ tvoice, ttext = text.split(':', maxsplit=1)
73
+ futures[executor.submit(download_and_process_speech, ttext, voice[tvoice], sr)] = i
74
+ else:
75
+ futures[executor.submit(download_and_process_speech, text, voice, sr)] = i
76
+ for future in concurrent.futures.as_completed(futures.keys()):
77
+ vocals[futures[future]] = future.result()
78
+
79
+ output = np.zeros_like(instrumental)
80
+ output[:len(instrumental)] = instrumental
81
+
82
+ for i, vocal in enumerate(vocals):
83
+ if i < len(measure_starts):
84
+ start_sample = librosa.time_to_samples(measure_starts[i+offset], sr=sr)
85
+ end_sample = start_sample + len(vocal)
86
+ output[start_sample:end_sample] += vocal[:end_sample - start_sample]
87
+
88
+ if name is not None:
89
+ sf.write(name+'.wav', output, sr, 'PCM_24')
90
+ return sr, output
91
+
92
+ def solve(text, beat, offset, time_signature):
93
+ text = text.replace(",", "").splitlines()
94
+ text = [l for l in text if l.strip() and not l.startswith("(") and not l.startswith('[')]
95
+ sr, output = place_vocals_on_track(beat, text, "snoop-dogg", name=None, offset=offset, time_signature=time_signature)
96
+ return sr, output
97
+
98
+ def process_and_play(text: str, file: gr.inputs.Audio, offset, time_signature) -> Tuple[str, gr.outputs.Audio]:
99
+ output = BytesIO()
100
+ wavfile.write(output, *file)
101
+ output.seek(0) # Reset the file pointer to the beginning of the buffer
102
+ sr, output_wav = solve(text, output, offset, time_signature) # Call the solve() function
103
+ output = BytesIO()
104
+ wavfile.write(output, sr, output_wav)
105
+ output.seek(0) # Reset the file pointer to the beginning of the buffer
106
+ return (sr, output_wav),
107
+
108
+
109
+ inputs = [
110
+ gr.inputs.Textbox(label="Input Text"),
111
+ gr.inputs.Audio(label="Input Audio"),
112
+ gr.inputs.Number(label="Offset", default=2), # Added input for the "offset"
113
+ gr.inputs.Number(label="Time Signature", default=8), # Added input for the "time signature"
114
+ ]
115
+
116
+ outputs = [
117
+ gr.outputs.Audio(label="Processed Audio", type='numpy')
118
+ ]
119
+
120
+ iface = gr.Interface(fn=process_and_play, inputs=inputs, outputs=outputs, title="Text and File Processor")
121
+ iface.launch()