juancopi81 commited on
Commit
3e79bbd
1 Parent(s): 27bce5c
Files changed (2) hide show
  1. app.py +76 -13
  2. spectro.py +183 -0
app.py CHANGED
@@ -1,6 +1,20 @@
1
- import gradio as gr
2
  import random
 
 
 
 
 
 
 
 
3
 
 
 
 
 
 
 
 
4
 
5
  COLORS = [
6
  ["#ff0000", "#00ff00"],
@@ -8,20 +22,69 @@ COLORS = [
8
  ["#0000ff", "#ff0000"],
9
  ]
10
 
11
- def audio_waveform(audio, image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  return (
13
- audio,
14
- gr.make_waveform(audio),
15
  gr.make_waveform(audio, bg_image=image, bars_color=random.choice(COLORS)),
16
  )
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- gr.Interface(
20
- audio_waveform,
21
- inputs=[gr.Audio(), gr.Image(type="filepath")],
22
- outputs=[
23
- gr.Audio(),
24
- gr.Video(),
25
- gr.Video(),
26
- ],
27
- ).launch()
 
 
1
  import random
2
+ from PIL import Image
3
+
4
+ from diffusers import StableDiffusionPipeline
5
+ import gradio as gr
6
+ import torch
7
+
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ dtype = torch.float16 if device == "cuda" else torch.float32
10
 
11
+ model_id = "runwayml/stable-diffusion-v1-5"
12
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=dtype)
13
+ pipe = pipe.to(device)
14
+
15
+ model_id2 = "riffusion/riffusion-model-v1"
16
+ pipe2 = StableDiffusionPipeline.from_pretrained(model_id2, torch_dtype=dtype)
17
+ pipe2 = pipe2.to(device)
18
 
19
  COLORS = [
20
  ["#ff0000", "#00ff00"],
 
22
  ["#0000ff", "#ff0000"],
23
  ]
24
 
25
+ title = """
26
+ <div style="text-align: center; max-width: 650px; margin: 0 auto 10px;">
27
+ <div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
28
+ <h1 style="font-weight: 950; margin-bottom: 7px; color: #000; font-weight: bold;">Riffusion and Stable Diffusion</h1>
29
+ </div>
30
+ <p style="margin-bottom: 10px; font-size: 98%; color: #666;">Text to music player.</p>
31
+ </div>
32
+ """
33
+ def get_bg_image(prompt):
34
+ images = pipe(prompt)
35
+ print("Image generated!")
36
+ image_output = images.images[0] if not images.nsfw_content_detected[0] else Image.open("nsfw_placeholder.jpg")
37
+ return image_output
38
+
39
+ def get_music(prompt):
40
+ spec = pipe2(prompt).images[0]
41
+ print(spec)
42
+ wav = wav_bytes_from_spectrogram_image(spec)
43
+ with open("output.wav", "wb") as f:
44
+ f.write(wav[0].getbuffer())
45
+ return 'output.wav'
46
+
47
+ def infer(prompt):
48
+ image = get_bg_image(prompt_input)
49
+ audio = get_music(prompt)
50
  return (
 
 
51
  gr.make_waveform(audio, bg_image=image, bars_color=random.choice(COLORS)),
52
  )
53
 
54
+ css = """
55
+ #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
56
+ #prompt-in {
57
+ border: 2px solid #666;
58
+ border-radius: 2px;
59
+ padding: 8px;
60
+ }
61
+ #btn-container {
62
+ display: flex;
63
+ align-items: center;
64
+ justify-content: center;
65
+ width: calc(15% - 16px);
66
+ height: calc(15% - 16px);
67
+ }
68
+ /* Style the submit button */
69
+ #submit-btn {
70
+ background-color: #382a1d;
71
+ color: #fff;
72
+ border: 1px solid #000;
73
+ border-radius: 4px;
74
+ padding: 8px;
75
+ font-size: 16px;
76
+ cursor: pointer;
77
+ }
78
+ """
79
+ with gr.Blocks(css=css) as demo:
80
+ gr.HTML(title)
81
+ with gr.Column(elem_id="col-container"):
82
+ prompt_input = gr.Textbox(placeholder="a cat diva singing in a New York jazz club",
83
+ elem_id="prompt-in",
84
+ show_label=False)
85
+ with gr.Row(elem_id="btn-container"):
86
+ send_btn = gr.Button(value="Send", elem_id="submit-btn")
87
+ video_output = gr.Video()
88
+ send_btn.click(infer, inputs=[prompt_input], outputs=video_output)
89
 
90
+ demo.queue().launch(debug=True)
 
 
 
 
 
 
 
 
spectro.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Audio processing tools to convert between spectrogram images and waveforms.
3
+ """
4
+ import io
5
+ import typing as T
6
+
7
+ import numpy as np
8
+ from PIL import Image
9
+ import pydub
10
+ from scipy.io import wavfile
11
+ import torch
12
+ import torchaudio
13
+
14
+
15
+ def wav_bytes_from_spectrogram_image(image: Image.Image) -> T.Tuple[io.BytesIO, float]:
16
+ """
17
+ Reconstruct a WAV audio clip from a spectrogram image. Also returns the duration in seconds.
18
+ """
19
+
20
+ max_volume = 50
21
+ power_for_image = 0.25
22
+ Sxx = spectrogram_from_image(image, max_volume=max_volume, power_for_image=power_for_image)
23
+
24
+ sample_rate = 44100 # [Hz]
25
+ clip_duration_ms = 5000 # [ms]
26
+
27
+ bins_per_image = 512
28
+ n_mels = 512
29
+
30
+ # FFT parameters
31
+ window_duration_ms = 100 # [ms]
32
+ padded_duration_ms = 400 # [ms]
33
+ step_size_ms = 10 # [ms]
34
+
35
+ # Derived parameters
36
+ num_samples = int(image.width / float(bins_per_image) * clip_duration_ms) * sample_rate
37
+ n_fft = int(padded_duration_ms / 1000.0 * sample_rate)
38
+ hop_length = int(step_size_ms / 1000.0 * sample_rate)
39
+ win_length = int(window_duration_ms / 1000.0 * sample_rate)
40
+
41
+ samples = waveform_from_spectrogram(
42
+ Sxx=Sxx,
43
+ n_fft=n_fft,
44
+ hop_length=hop_length,
45
+ win_length=win_length,
46
+ num_samples=num_samples,
47
+ sample_rate=sample_rate,
48
+ mel_scale=True,
49
+ n_mels=n_mels,
50
+ max_mel_iters=200,
51
+ num_griffin_lim_iters=32,
52
+ )
53
+
54
+ wav_bytes = io.BytesIO()
55
+ wavfile.write(wav_bytes, sample_rate, samples.astype(np.int16))
56
+ wav_bytes.seek(0)
57
+
58
+ duration_s = float(len(samples)) / sample_rate
59
+
60
+ return wav_bytes, duration_s
61
+
62
+
63
+ def spectrogram_from_image(
64
+ image: Image.Image, max_volume: float = 50, power_for_image: float = 0.25
65
+ ) -> np.ndarray:
66
+ """
67
+ Compute a spectrogram magnitude array from a spectrogram image.
68
+ TODO(hayk): Add image_from_spectrogram and call this out as the reverse.
69
+ """
70
+ # Convert to a numpy array of floats
71
+ data = np.array(image).astype(np.float32)
72
+
73
+ # Flip Y take a single channel
74
+ data = data[::-1, :, 0]
75
+
76
+ # Invert
77
+ data = 255 - data
78
+
79
+ # Rescale to max volume
80
+ data = data * max_volume / 255
81
+
82
+ # Reverse the power curve
83
+ data = np.power(data, 1 / power_for_image)
84
+
85
+ return data
86
+
87
+
88
+ def spectrogram_from_waveform(
89
+ waveform: np.ndarray,
90
+ sample_rate: int,
91
+ n_fft: int,
92
+ hop_length: int,
93
+ win_length: int,
94
+ mel_scale: bool = True,
95
+ n_mels: int = 512,
96
+ ) -> np.ndarray:
97
+ """
98
+ Compute a spectrogram from a waveform.
99
+ """
100
+
101
+ spectrogram_func = torchaudio.transforms.Spectrogram(
102
+ n_fft=n_fft,
103
+ power=None,
104
+ hop_length=hop_length,
105
+ win_length=win_length,
106
+ )
107
+
108
+ waveform_tensor = torch.from_numpy(waveform.astype(np.float32)).reshape(1, -1)
109
+ Sxx_complex = spectrogram_func(waveform_tensor).numpy()[0]
110
+
111
+ Sxx_mag = np.abs(Sxx_complex)
112
+
113
+ if mel_scale:
114
+ mel_scaler = torchaudio.transforms.MelScale(
115
+ n_mels=n_mels,
116
+ sample_rate=sample_rate,
117
+ f_min=0,
118
+ f_max=10000,
119
+ n_stft=n_fft // 2 + 1,
120
+ norm=None,
121
+ mel_scale="htk",
122
+ )
123
+
124
+ Sxx_mag = mel_scaler(torch.from_numpy(Sxx_mag)).numpy()
125
+
126
+ return Sxx_mag
127
+
128
+
129
+ def waveform_from_spectrogram(
130
+ Sxx: np.ndarray,
131
+ n_fft: int,
132
+ hop_length: int,
133
+ win_length: int,
134
+ num_samples: int,
135
+ sample_rate: int,
136
+ mel_scale: bool = True,
137
+ n_mels: int = 512,
138
+ max_mel_iters: int = 200,
139
+ num_griffin_lim_iters: int = 32,
140
+ device: str = "cuda:0",
141
+ ) -> np.ndarray:
142
+ """
143
+ Reconstruct a waveform from a spectrogram.
144
+ This is an approximate inverse of spectrogram_from_waveform, using the Griffin-Lim algorithm
145
+ to approximate the phase.
146
+ """
147
+ Sxx_torch = torch.from_numpy(Sxx).to(device)
148
+
149
+ # TODO(hayk): Make this a class that caches the two things
150
+
151
+ if mel_scale:
152
+ mel_inv_scaler = torchaudio.transforms.InverseMelScale(
153
+ n_mels=n_mels,
154
+ sample_rate=sample_rate,
155
+ f_min=0,
156
+ f_max=10000,
157
+ n_stft=n_fft // 2 + 1,
158
+ norm=None,
159
+ mel_scale="htk",
160
+ max_iter=max_mel_iters,
161
+ ).to(device)
162
+
163
+ Sxx_torch = mel_inv_scaler(Sxx_torch)
164
+
165
+ griffin_lim = torchaudio.transforms.GriffinLim(
166
+ n_fft=n_fft,
167
+ win_length=win_length,
168
+ hop_length=hop_length,
169
+ power=1.0,
170
+ n_iter=num_griffin_lim_iters,
171
+ ).to(device)
172
+
173
+ waveform = griffin_lim(Sxx_torch).cpu().numpy()
174
+
175
+ return waveform
176
+
177
+
178
+ def mp3_bytes_from_wav_bytes(wav_bytes: io.BytesIO) -> io.BytesIO:
179
+ mp3_bytes = io.BytesIO()
180
+ sound = pydub.AudioSegment.from_wav(wav_bytes)
181
+ sound.export(mp3_bytes, format="mp3")
182
+ mp3_bytes.seek(0)
183
+ return mp3_bytes