Ahsen Khaliq commited on
Commit
c6d1a7a
1 Parent(s): f4e8264

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -13
app.py CHANGED
@@ -52,12 +52,9 @@ nest_asyncio.apply()
52
  SAMPLE_RATE = 16000
53
  SF2_PATH = 'SGM-v2.01-Sal-Guit-Bass-V1.3.sf2'
54
 
55
- def upload_audio(sample_rate):
56
- data = list(files.upload().values())
57
- if len(data) > 1:
58
- print('Multiple files uploaded; using only one.')
59
  return note_seq.audio_io.wav_data_to_samples_librosa(
60
- data[0], sample_rate=sample_rate)
61
 
62
 
63
 
@@ -259,24 +256,33 @@ class InferenceModel(object):
259
 
260
 
261
 
 
 
 
 
 
262
  def inference(audio):
263
- os.system("midi_ddsp_synthesize --midi_path "+audio.name)
264
- return Path(audio.name).stem+"/0_violin.wav"
 
 
 
 
 
265
 
266
  title = "Midi-DDSP"
267
  description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
268
 
269
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.09312' target='_blank'>MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling</a> | <a href='https://github.com/magenta/midi-ddsp' target='_blank'>Github Repo</a></p>"
270
 
271
- examples=[['input.mid']]
272
 
273
  gr.Interface(
274
  inference,
275
- gr.inputs.File(type="file", label="Input"),
276
- [gr.outputs.Audio(type="file", label="Output")],
277
  title=title,
278
  description=description,
279
  article=article,
280
- examples=examples,
281
- enable_queue=True
282
- ).launch(debug=True)
52
  SAMPLE_RATE = 16000
53
  SF2_PATH = 'SGM-v2.01-Sal-Guit-Bass-V1.3.sf2'
54
 
55
+ def upload_audio(audio, sample_rate):
 
 
 
56
  return note_seq.audio_io.wav_data_to_samples_librosa(
57
+ audio, sample_rate=sample_rate)
58
 
59
 
60
 
256
 
257
 
258
 
259
+
260
+
261
+ inference_model = InferenceModel('./checkpoints/mt3/', 'mt3')
262
+
263
+
264
  def inference(audio):
265
+ audio = upload_audio(audio,sample_rate=16000)
266
+
267
+ est_ns = inference_model(audio)
268
+
269
+ note_seq.sequence_proto_to_midi_file(est_ns, './transcribed.mid')
270
+
271
+ return './transcribed.mid'
272
 
273
  title = "Midi-DDSP"
274
  description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
275
 
276
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.09312' target='_blank'>MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling</a> | <a href='https://github.com/magenta/midi-ddsp' target='_blank'>Github Repo</a></p>"
277
 
278
+ examples=[['download.wav']]
279
 
280
  gr.Interface(
281
  inference,
282
+ gr.inputs.Audio(type="filepath", label="Input"),
283
+ [gr.outputs.File(type="file", label="Output")],
284
  title=title,
285
  description=description,
286
  article=article,
287
+ examples=examples
288
+ ).launch(enable_queue=True)