hugofloresgarcia commited on
Commit
73b8dce
1 Parent(s): 23b6dff
Files changed (2) hide show
  1. app.py +9 -13
  2. requirements.txt +1 -1
app.py CHANGED
@@ -375,7 +375,7 @@ def ui_harp(launch_kwargs):
375
  with gr.Row():
376
  melody = gr.Audio(label="Melody Audio", type="filepath")
377
  text = gr.Text(label="Input Text", interactive=True)
378
- with gr.Row():
379
  # model = gr.Radio(["melody", "medium", "small", "large"],
380
  # label="Model", value="small", interactive=True)
381
 
@@ -392,35 +392,31 @@ def ui_harp(launch_kwargs):
392
 
393
  def predict_harp(_melody, _text, _duration, _temperature, _cfg_coef):
394
  from audiotools import AudioSignal
395
- import torcb
396
- sig = AudioSignal(_melody)
397
 
398
- samples = sig.samples[0].numpy()
399
  output = predict_full(
400
  model="small",
 
401
  text=_text, duration=_duration,
402
  topk=250, topp=0,
403
  temperature=_temperature,
404
  cfg_coef=_cfg_coef
405
  )
406
- sig = AudioSignal(torch.from_numpy(output).unsqueeze(0))
407
- sig.write('output.wav')
408
- return sig.path_to_file
409
 
410
-
411
- submit.click(predict_harp,
412
- inputs=inputs,
413
- outputs=[output])
414
-
415
 
416
  from pyharp import ModelCard, build_endpoint
417
  card = ModelCard(
418
  name="MusicGen (Meta)",
419
- description="The model will generate a short music extract based on the description you provided. The model can generate up to 30 seconds of audio in one pass.",
420
  author="Jade Copet et al.",
421
  tags=["example", "music generation"]
422
  )
423
 
 
 
424
 
425
  interface.queue().launch(**launch_kwargs)
426
 
 
375
  with gr.Row():
376
  melody = gr.Audio(label="Melody Audio", type="filepath")
377
  text = gr.Text(label="Input Text", interactive=True)
378
+ # with gr.Row():
379
  # model = gr.Radio(["melody", "medium", "small", "large"],
380
  # label="Model", value="small", interactive=True)
381
 
 
392
 
393
  def predict_harp(_melody, _text, _duration, _temperature, _cfg_coef):
394
  from audiotools import AudioSignal
395
+ import torch
396
+ # sig = AudioSignal(_melody)
397
 
398
+ # samples = (sig.sample_rate, sig.samples[0].numpy())
399
  output = predict_full(
400
  model="small",
401
+ melody=None,
402
  text=_text, duration=_duration,
403
  topk=250, topp=0,
404
  temperature=_temperature,
405
  cfg_coef=_cfg_coef
406
  )
407
+ return output
 
 
408
 
 
 
 
 
 
409
 
410
  from pyharp import ModelCard, build_endpoint
411
  card = ModelCard(
412
  name="MusicGen (Meta)",
413
+ description="The model will generate a short excerpt based on the text description you provided. The model can generate up to 30 seconds of audio in one pass.",
414
  author="Jade Copet et al.",
415
  tags=["example", "music generation"]
416
  )
417
 
418
+ build_endpoint(inputs, output, predict_harp, card=card)
419
+
420
 
421
  interface.queue().launch(**launch_kwargs)
422
 
requirements.txt CHANGED
@@ -19,4 +19,4 @@ demucs
19
  librosa
20
  gradio_client==0.2.6
21
  descript-audiotools
22
- -e git+https://github.com/audacitorch/pyharp.git#egg=pyharp
 
19
  librosa
20
  gradio_client==0.2.6
21
  descript-audiotools
22
+ -e git+https://github.com/audacitorch/pyharp.git#egg=pyharp