kevinwang676 commited on
Commit
25fce83
1 Parent(s): 5af09d3

Update app_multi.py

Browse files
Files changed (1) hide show
  1. app_multi.py +380 -26
app_multi.py CHANGED
@@ -1,21 +1,34 @@
1
  from typing import Union
2
 
3
  from argparse import ArgumentParser
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  import asyncio
6
  import json
7
  import hashlib
8
  from os import path, getenv
 
9
 
10
  import gradio as gr
11
 
12
  import torch
13
 
14
- import numpy as np
15
- import librosa
16
-
17
  import edge_tts
18
 
 
 
 
19
  import config
20
  import util
21
  from infer_pack.models import (
@@ -27,6 +40,8 @@ from vc_infer_pipeline import VC
27
  # Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa
28
  in_hf_space = getenv('SYSTEM') == 'spaces'
29
 
 
 
30
  # Argument parsing
31
  arg_parser = ArgumentParser()
32
  arg_parser.add_argument(
@@ -127,7 +142,303 @@ print(f'Models loaded: {len(loaded_models)}')
127
  # Edge TTS speakers
128
  tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  # https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/blob/main/infer-web.py#L118 # noqa
132
  def vc_func(
133
  input_audio, model_index, pitch_adjust, f0_method, feat_ratio,
@@ -146,8 +457,8 @@ def vc_func(
146
 
147
  # https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L49
148
  # Can be change well, we will see
149
- if (audio_npy.shape[0] / audio_samp) > 320 and in_hf_space:
150
- return (None, 'Input audio is longer than 60 secs.')
151
 
152
  # Bloody hell: https://stackoverflow.com/questions/26921836/
153
  if audio_npy.dtype != np.float32: # :thonk:
@@ -293,22 +604,42 @@ async def _example_edge_tts(
293
 
294
 
295
  with app:
296
- gr.Markdown(
297
- '## A simplistic Web interface\n'
298
- 'RVC interface, project based on [RVC-WebUI](https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI)' # thx noqa
299
- 'A lot of inspiration from what\'s already out there, including [zomehwh/rvc-models](https://huggingface.co/spaces/zomehwh/rvc-models) & [DJQmUKV/rvc-inference](https://huggingface.co/spaces/DJQmUKV/rvc-inference).\n ' # thx noqa
300
- )
301
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  with gr.Row():
303
  with gr.Column():
304
- with gr.Tab('Audio conversion'):
305
- input_audio = gr.Audio(label='Input audio')
306
-
307
- vc_convert_btn = gr.Button('Convert', variant='primary')
308
-
309
- with gr.Tab('TTS conversion'):
310
- tts_input = gr.TextArea(
311
- label='TTS input text'
 
 
312
  )
313
  tts_speaker = gr.Dropdown(
314
  [
@@ -318,12 +649,24 @@ with app:
318
  )
319
  for s in tts_speakers_list
320
  ],
321
- label='TTS speaker',
322
  type='index'
323
  )
324
 
325
- tts_convert_btn = gr.Button('Convert', variant='primary')
326
-
 
 
 
 
 
 
 
 
 
 
 
 
327
  pitch_adjust = gr.Slider(
328
  label='Pitch',
329
  minimum=-24,
@@ -338,7 +681,7 @@ with app:
338
  interactive=True
339
  )
340
 
341
- with gr.Accordion('Advanced options', open=False):
342
  feat_ratio = gr.Slider(
343
  label='Feature ratio',
344
  minimum=0,
@@ -382,19 +725,19 @@ with app:
382
  )
383
  for m in loaded_models
384
  ],
385
- label='Model',
386
  type='index'
387
  )
388
 
389
  # Model info
390
  with gr.Box():
391
  model_info = gr.Markdown(
392
- '### Model info\n'
393
  'Please select a model from dropdown above.',
394
  elem_id='model_info'
395
  )
396
 
397
- output_audio = gr.Audio(label='Output audio')
398
  output_msg = gr.Textbox(label='Output message')
399
 
400
  multi_examples = multi_cfg.get('examples')
@@ -454,6 +797,8 @@ with app:
454
  api_name='tts_conversion'
455
  )
456
 
 
 
457
  model_index.change(
458
  update_model_info,
459
  inputs=[model_index],
@@ -461,6 +806,15 @@ with app:
461
  show_progress=False,
462
  queue=False
463
  )
 
 
 
 
 
 
 
 
 
464
 
465
  app.queue(
466
  concurrency_count=1,
 
1
  from typing import Union
2
 
3
  from argparse import ArgumentParser
4
+ from pathlib import Path
5
+ import subprocess
6
+ import librosa
7
+ import os
8
+ import time
9
+ import random
10
+
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ from PIL import Image, ImageDraw, ImageFont
14
+ from moviepy.editor import *
15
+ from moviepy.video.io.VideoFileClip import VideoFileClip
16
 
17
  import asyncio
18
  import json
19
  import hashlib
20
  from os import path, getenv
21
+ from pydub import AudioSegment
22
 
23
  import gradio as gr
24
 
25
  import torch
26
 
 
 
 
27
  import edge_tts
28
 
29
+ from datetime import datetime
30
+ from scipy.io.wavfile import write
31
+
32
  import config
33
  import util
34
  from infer_pack.models import (
 
40
  # Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa
41
  in_hf_space = getenv('SYSTEM') == 'spaces'
42
 
43
+ high_quality = True
44
+
45
  # Argument parsing
46
  arg_parser = ArgumentParser()
47
  arg_parser.add_argument(
 
142
  # Edge TTS speakers
143
  tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa
144
 
145
+ # Make MV
146
+ def make_bars_image(height_values, index, new_height):
147
+
148
+ # Define the size of the image
149
+ width = 512
150
+ height = new_height
151
+
152
+ # Create a new image with a transparent background
153
+ image = Image.new('RGBA', (width, height), color=(0, 0, 0, 0))
154
+
155
+ # Get the image drawing context
156
+ draw = ImageDraw.Draw(image)
157
+
158
+ # Define the rectangle width and spacing
159
+ rect_width = 2
160
+ spacing = 2
161
+
162
+ # Define the list of height values for the rectangles
163
+ #height_values = [20, 40, 60, 80, 100, 80, 60, 40]
164
+ num_bars = len(height_values)
165
+ # Calculate the total width of the rectangles and the spacing
166
+ total_width = num_bars * rect_width + (num_bars - 1) * spacing
167
+
168
+ # Calculate the starting position for the first rectangle
169
+ start_x = int((width - total_width) / 2)
170
+ # Define the buffer size
171
+ buffer_size = 80
172
+ # Draw the rectangles from left to right
173
+ x = start_x
174
+ for i, height in enumerate(height_values):
175
+
176
+ # Define the rectangle coordinates
177
+ y0 = buffer_size
178
+ y1 = height + buffer_size
179
+ x0 = x
180
+ x1 = x + rect_width
181
+
182
+ # Draw the rectangle
183
+ draw.rectangle([x0, y0, x1, y1], fill='white')
184
+
185
+ # Move to the next rectangle position
186
+ if i < num_bars - 1:
187
+ x += rect_width + spacing
188
+
189
+
190
+ # Rotate the image by 180 degrees
191
+ image = image.rotate(180)
192
+
193
+ # Mirror the image
194
+ image = image.transpose(Image.FLIP_LEFT_RIGHT)
195
+
196
+ # Save the image
197
+ image.save('audio_bars_'+ str(index) + '.png')
198
+
199
+ return 'audio_bars_'+ str(index) + '.png'
200
+
201
+ def db_to_height(db_value):
202
+ # Scale the dB value to a range between 0 and 1
203
+ scaled_value = (db_value + 80) / 80
204
+
205
+ # Convert the scaled value to a height between 0 and 100
206
+ height = scaled_value * 50
207
+
208
+ return height
209
+
210
+ def infer(title, audio_in, image_in):
211
+ # Load the audio file
212
+ audio_path = audio_in
213
+ audio_data, sr = librosa.load(audio_path)
214
+
215
+ # Get the duration in seconds
216
+ duration = librosa.get_duration(y=audio_data, sr=sr)
217
+
218
+ # Extract the audio data for the desired time
219
+ start_time = 0 # start time in seconds
220
+ end_time = duration # end time in seconds
221
+
222
+ start_index = int(start_time * sr)
223
+ end_index = int(end_time * sr)
224
+
225
+ audio_data = audio_data[start_index:end_index]
226
+
227
+ # Compute the short-time Fourier transform
228
+ hop_length = 512
229
+
230
+
231
+ stft = librosa.stft(audio_data, hop_length=hop_length)
232
+ spectrogram = librosa.amplitude_to_db(np.abs(stft), ref=np.max)
233
+
234
+ # Get the frequency values
235
+ freqs = librosa.fft_frequencies(sr=sr, n_fft=stft.shape[0])
236
+
237
+ # Select the indices of the frequency values that correspond to the desired frequencies
238
+ n_freqs = 114
239
+ freq_indices = np.linspace(0, len(freqs) - 1, n_freqs, dtype=int)
240
+
241
+ # Extract the dB values for the desired frequencies
242
+ db_values = []
243
+ for i in range(spectrogram.shape[1]):
244
+ db_values.append(list(zip(freqs[freq_indices], spectrogram[freq_indices, i])))
245
+
246
+ # Print the dB values for the first time frame
247
+ print(db_values[0])
248
+
249
+ proportional_values = []
250
+
251
+ for frame in db_values:
252
+ proportional_frame = [db_to_height(db) for f, db in frame]
253
+ proportional_values.append(proportional_frame)
254
 
255
+ print(proportional_values[0])
256
+ print("AUDIO CHUNK: " + str(len(proportional_values)))
257
+
258
+ # Open the background image
259
+ background_image = Image.open(image_in)
260
+
261
+ # Resize the image while keeping its aspect ratio
262
+ bg_width, bg_height = background_image.size
263
+ aspect_ratio = bg_width / bg_height
264
+ new_width = 512
265
+ new_height = int(new_width / aspect_ratio)
266
+ resized_bg = background_image.resize((new_width, new_height))
267
+
268
+ # Apply black cache for better visibility of the white text
269
+ bg_cache = Image.open('black_cache.png')
270
+ resized_bg.paste(bg_cache, (0, resized_bg.height - bg_cache.height), mask=bg_cache)
271
+
272
+ # Create a new ImageDraw object
273
+ draw = ImageDraw.Draw(resized_bg)
274
+
275
+ # Define the text to be added
276
+ text = title
277
+ font = ImageFont.truetype("NotoSansSC-Regular.otf", 16)
278
+ text_color = (255, 255, 255) # white color
279
+
280
+ # Calculate the position of the text
281
+ text_width, text_height = draw.textsize(text, font=font)
282
+ x = 30
283
+ y = new_height - 70
284
+
285
+ # Draw the text on the image
286
+ draw.text((x, y), text, fill=text_color, font=font)
287
+
288
+ # Save the resized image
289
+ resized_bg.save('resized_background.jpg')
290
+
291
+ generated_frames = []
292
+ for i, frame in enumerate(proportional_values):
293
+ bars_img = make_bars_image(frame, i, new_height)
294
+ bars_img = Image.open(bars_img)
295
+ # Paste the audio bars image on top of the background image
296
+ fresh_bg = Image.open('resized_background.jpg')
297
+ fresh_bg.paste(bars_img, (0, 0), mask=bars_img)
298
+ # Save the image
299
+ fresh_bg.save('audio_bars_with_bg' + str(i) + '.jpg')
300
+ generated_frames.append('audio_bars_with_bg' + str(i) + '.jpg')
301
+ print(generated_frames)
302
+
303
+ # Create a video clip from the images
304
+ clip = ImageSequenceClip(generated_frames, fps=len(generated_frames)/(end_time-start_time))
305
+ audio_clip = AudioFileClip(audio_in)
306
+ clip = clip.set_audio(audio_clip)
307
+ # Set the output codec
308
+ codec = 'libx264'
309
+ audio_codec = 'aac'
310
+ # Save the video to a file
311
+ clip.write_videofile("my_video.mp4", codec=codec, audio_codec=audio_codec)
312
+
313
+ retimed_clip = VideoFileClip("my_video.mp4")
314
+
315
+ # Set the desired frame rate
316
+ new_fps = 25
317
+
318
+ # Create a new clip with the new frame rate
319
+ new_clip = retimed_clip.set_fps(new_fps)
320
+
321
+ # Save the new clip as a new video file
322
+ new_clip.write_videofile("my_video_retimed.mp4", codec=codec, audio_codec=audio_codec)
323
+
324
+ return "my_video_retimed.mp4"
325
+
326
+ # mix vocal and non-vocal
327
+ def mix(audio1, audio2):
328
+ sound1 = AudioSegment.from_file(audio1)
329
+ sound2 = AudioSegment.from_file(audio2)
330
+ length = len(sound1)
331
+ mixed = sound1[:length].overlay(sound2)
332
+
333
+ mixed.export("song.wav", format="wav")
334
+
335
+ return "song.wav"
336
+
337
+ # Bilibili
338
+ def youtube_downloader(
339
+ video_identifier,
340
+ start_time,
341
+ end_time,
342
+ output_filename="track.wav",
343
+ num_attempts=5,
344
+ url_base="",
345
+ quiet=False,
346
+ force=True,
347
+ ):
348
+ output_path = Path(output_filename)
349
+ if output_path.exists():
350
+ if not force:
351
+ return output_path
352
+ else:
353
+ output_path.unlink()
354
+
355
+ quiet = "--quiet --no-warnings" if quiet else ""
356
+ command = f"""
357
+ yt-dlp {quiet} -x --audio-format wav -f bestaudio -o "{output_filename}" --download-sections "*{start_time}-{end_time}" "{url_base}{video_identifier}" # noqa: E501
358
+ """.strip()
359
+
360
+ attempts = 0
361
+ while True:
362
+ try:
363
+ _ = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
364
+ except subprocess.CalledProcessError:
365
+ attempts += 1
366
+ if attempts == num_attempts:
367
+ return None
368
+ else:
369
+ break
370
+
371
+ if output_path.exists():
372
+ return output_path
373
+ else:
374
+ return None
375
+
376
+ def audio_separated(audio_input, progress=gr.Progress()):
377
+ # start progress
378
+ progress(progress=0, desc="Starting...")
379
+ time.sleep(0.1)
380
+
381
+ # check file input
382
+ if audio_input is None:
383
+ # show progress
384
+ for i in progress.tqdm(range(100), desc="Please wait..."):
385
+ time.sleep(0.01)
386
+
387
+ return (None, None, 'Please input audio.')
388
+
389
+ # create filename
390
+ filename = str(random.randint(10000,99999))+datetime.now().strftime("%d%m%Y%H%M%S")
391
+
392
+ # progress
393
+ progress(progress=0.10, desc="Please wait...")
394
+
395
+ # make dir output
396
+ os.makedirs("output", exist_ok=True)
397
+
398
+ # progress
399
+ progress(progress=0.20, desc="Please wait...")
400
+
401
+ # write
402
+ if high_quality:
403
+ write(filename+".wav", audio_input[0], audio_input[1])
404
+ else:
405
+ write(filename+".mp3", audio_input[0], audio_input[1])
406
+
407
+ # progress
408
+ progress(progress=0.50, desc="Please wait...")
409
+
410
+ # demucs process
411
+ if high_quality:
412
+ command_demucs = "python3 -m demucs --two-stems=vocals -d cpu "+filename+".wav -o output"
413
+ else:
414
+ command_demucs = "python3 -m demucs --two-stems=vocals --mp3 --mp3-bitrate 128 -d cpu "+filename+".mp3 -o output"
415
+
416
+ os.system(command_demucs)
417
+
418
+ # progress
419
+ progress(progress=0.70, desc="Please wait...")
420
+
421
+ # remove file audio
422
+ if high_quality:
423
+ command_delete = "rm -v ./"+filename+".wav"
424
+ else:
425
+ command_delete = "rm -v ./"+filename+".mp3"
426
+
427
+ os.system(command_delete)
428
+
429
+ # progress
430
+ progress(progress=0.80, desc="Please wait...")
431
+
432
+ # progress
433
+ for i in progress.tqdm(range(80,100), desc="Please wait..."):
434
+ time.sleep(0.1)
435
+
436
+ if high_quality:
437
+ return "./output/htdemucs/"+filename+"/vocals.wav","./output/htdemucs/"+filename+"/no_vocals.wav","Successfully..."
438
+ else:
439
+ return "./output/htdemucs/"+filename+"/vocals.mp3","./output/htdemucs/"+filename+"/no_vocals.mp3","Successfully..."
440
+
441
+
442
  # https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/blob/main/infer-web.py#L118 # noqa
443
  def vc_func(
444
  input_audio, model_index, pitch_adjust, f0_method, feat_ratio,
 
457
 
458
  # https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L49
459
  # Can be change well, we will see
460
+ if (audio_npy.shape[0] / audio_samp) > 600 and in_hf_space:
461
+ return (None, 'Input audio is longer than 600 secs.')
462
 
463
  # Bloody hell: https://stackoverflow.com/questions/26921836/
464
  if audio_npy.dtype != np.float32: # :thonk:
 
604
 
605
 
606
  with app:
607
+ gr.HTML("<center>"
608
+ "<h1>🥳🎶🎡 - AI歌手,RVC歌声转换 + AI变声</h1>"
609
+ "</center>")
610
+ gr.Markdown("### <center>🦄 - 能够自动提取视频中的声音,并去除背景音;Powered by [RVC-Project](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)</center>")
611
+ gr.Markdown("### <center>更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);滔滔AI,为爱滔滔!💕</center>")
612
+
613
+ with gr.Tab("🤗 - B站视频提取声音"):
614
+ with gr.Row():
615
+ with gr.Column():
616
+ ydl_url_input = gr.Textbox(label="B站视频网址(可直接填写相应的BV号)", value = "https://www.bilibili.com/video/BV...")
617
+ start = gr.Number(value=0, label="起始时间 (秒)")
618
+ end = gr.Number(value=15, label="结束时间 (秒)")
619
+ ydl_url_submit = gr.Button("提取声音文件吧", variant="primary")
620
+ as_audio_submit = gr.Button("去除背景音吧", variant="primary")
621
+ with gr.Column():
622
+ ydl_audio_output = gr.Audio(label="Audio from Bilibili")
623
+ as_audio_input = ydl_audio_output
624
+ as_audio_vocals = gr.Audio(label="歌曲人声部分")
625
+ as_audio_no_vocals = gr.Audio(label="Music only", type="filepath", visible=False)
626
+ as_audio_message = gr.Textbox(label="Message", visible=False)
627
+
628
+ ydl_url_submit.click(fn=youtube_downloader, inputs=[ydl_url_input, start, end], outputs=[ydl_audio_output])
629
+ as_audio_submit.click(fn=audio_separated, inputs=[as_audio_input], outputs=[as_audio_vocals, as_audio_no_vocals, as_audio_message], show_progress=True, queue=True)
630
+
631
  with gr.Row():
632
  with gr.Column():
633
+ with gr.Tab('🎶 - 歌声转换'):
634
+ input_audio = as_audio_vocals
635
+ vc_convert_btn = gr.Button('进行歌声转换吧!', variant='primary')
636
+ full_song = gr.Button("加入歌曲伴奏吧!", variant="primary")
637
+ new_song = gr.Audio(label="AI歌手+伴奏", type="filepath")
638
+
639
+ with gr.Tab('🎙️ - 文本转语音'):
640
+ tts_input = gr.Textbox(
641
+ label='请填写您想要转换的文本(中英皆可)',
642
+ lines=3
643
  )
644
  tts_speaker = gr.Dropdown(
645
  [
 
649
  )
650
  for s in tts_speakers_list
651
  ],
652
+ label='请选择一个相应语言的说话人',
653
  type='index'
654
  )
655
 
656
+ tts_convert_btn = gr.Button('进行AI变声吧', variant='primary')
657
+
658
+ with gr.Tab("📺 - 音乐视频"):
659
+ with gr.Row():
660
+ with gr.Column():
661
+ inp1 = gr.Textbox(label="为视频配上精彩的文案吧(选填;英文)")
662
+ inp2 = new_song
663
+ inp3 = gr.Image(source='upload', type='filepath', label="上传一张背景图片吧")
664
+ btn = gr.Button("生成您的专属音乐视频吧", variant="primary")
665
+
666
+ with gr.Column():
667
+ out1 = gr.Video(label='您的专属音乐视频')
668
+ btn.click(fn=infer, inputs=[inp1, inp2, inp3], outputs=[out1])
669
+
670
  pitch_adjust = gr.Slider(
671
  label='Pitch',
672
  minimum=-24,
 
681
  interactive=True
682
  )
683
 
684
+ with gr.Accordion('更多设置', open=False):
685
  feat_ratio = gr.Slider(
686
  label='Feature ratio',
687
  minimum=0,
 
725
  )
726
  for m in loaded_models
727
  ],
728
+ label='请选择您的AI歌手(必选)',
729
  type='index'
730
  )
731
 
732
  # Model info
733
  with gr.Box():
734
  model_info = gr.Markdown(
735
+ '### AI歌手信息\n'
736
  'Please select a model from dropdown above.',
737
  elem_id='model_info'
738
  )
739
 
740
+ output_audio = gr.Audio(label='AI歌手(无伴奏)', type="filepath")
741
  output_msg = gr.Textbox(label='Output message')
742
 
743
  multi_examples = multi_cfg.get('examples')
 
797
  api_name='tts_conversion'
798
  )
799
 
800
+ full_song.click(fn=mix, inputs=[output_audio, as_audio_no_vocals], outputs=[new_song])
801
+
802
  model_index.change(
803
  update_model_info,
804
  inputs=[model_index],
 
806
  show_progress=False,
807
  queue=False
808
  )
809
+
810
+ gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。</center>")
811
+ gr.Markdown("### <center>🧸 - 如何使用此程序:填写视频网址和视频起止时间后,依次点击“提取声音文件吧”、“去除背景音吧”、“进行歌声转换吧!”、“加入歌曲伴奏吧!”四个按键即可。</center>")
812
+ gr.HTML('''
813
+ <div class="footer">
814
+ <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
815
+ </p>
816
+ </div>
817
+ ''')
818
 
819
  app.queue(
820
  concurrency_count=1,