juancopi81 commited on
Commit
ed28ae4
1 Parent(s): 69513e2

Change to gradio blocks, add radio for selecting the model

Browse files
Files changed (1) hide show
  1. app.py +47 -15
app.py CHANGED
@@ -10,31 +10,63 @@ from inferencemodel import InferenceModel
10
  from utils import upload_audio
11
 
12
  SAMPLE_RATE = 16000
13
- SF2_PATH = 'SGM-v2.01-Sal-Guit-Bass-V1.3.sf2'
14
 
15
  # Start inference model
16
- inference_model = InferenceModel('/home/user/app/checkpoints/mt3/', 'mt3')
 
 
 
 
 
 
 
 
 
17
 
18
  def inference(audio):
19
- with open(audio, 'rb') as fd:
20
- contents = fd.read()
21
 
22
- audio = upload_audio(contents,sample_rate=16000)
23
 
24
- est_ns = inference_model(audio)
25
-
26
- note_seq.sequence_proto_to_midi_file(est_ns, './transcribed.mid')
27
-
28
- return './transcribed.mid'
29
 
30
- title = "MT3"
31
- description = "Gradio demo for MT3: Multi-Task Multitrack Music Transcription. To use it, simply upload your audio file, or click one of the examples to load them. Read more at the links below."
 
 
32
 
33
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.03017' target='_blank'>MT3: Multi-Task Multitrack Music Transcription</a> | <a href='https://github.com/magenta/mt3' target='_blank'>Github Repo</a></p>"
34
 
35
- examples=[['download.wav']]
 
36
 
37
- gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  inference,
39
  gr.inputs.Audio(type="filepath", label="Input"),
40
  [gr.outputs.File(label="Output")],
@@ -42,4 +74,4 @@ gr.Interface(
42
  description=description,
43
  article=article,
44
  examples=examples,
45
- ).launch().queue()
 
10
  from utils import upload_audio
11
 
12
  SAMPLE_RATE = 16000
13
+ SF2_PATH = "SGM-v2.01-Sal-Guit-Bass-V1.3.sf2"
14
 
15
  # Start inference model
16
+ inference_model = InferenceModel("/home/user/app/checkpoints/mt3/", "mt3")
17
+ current_model = "mt3"
18
+
19
+ def change_model(model):
20
+ global current_model
21
+ if model == current_model:
22
+ return
23
+ global inference_model
24
+ inference_model = InferenceModel("/home/user/app/checkpoints/mt3/", model)
25
+ current_model = model
26
 
27
  def inference(audio):
28
+ with open(audio, "rb") as fd:
29
+ contents = fd.read()
30
 
31
+ audio = upload_audio(contents,sample_rate=16000)
32
 
33
+ est_ns = inference_model(audio)
34
+
35
+ note_seq.sequence_proto_to_midi_file(est_ns, "./transcribed.mid")
36
+
37
+ return "./transcribed.mid"
38
 
39
+ title = "Transcribe music from YouTube videos using Transformers."
40
+ description = """"
41
+ Gradio demo for Music Transcription with Transformers Read more in the links below.
42
+ """
43
 
44
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.03017' target='_blank'>MT3: Multi-Task Multitrack Music Transcription</a> | <a href='https://github.com/magenta/mt3' target='_blank'>Github Repo</a></p>"
45
 
46
+ # Create a block object
47
+ demo = gr.Blocks()
48
 
49
+ # Use your Block object as a context
50
+ with demo:
51
+ gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>"
52
+ + title
53
+ + "</h1>")
54
+ gr.Markdown(description)
55
+ with gr.Box():
56
+ with gr.Row():
57
+ gr.Markdown("<h2>Select your model</h2>")
58
+ gr.Markdown("""
59
+ The ismir2021 model transcribes piano only, with note velocities.
60
+ The mt3 model transcribes multiple simultaneous instruments, but without velocities."
61
+ """)
62
+ model = gr.Radio(
63
+ ["mt3", "ismir2021"], label="What kind of model you want to use?"
64
+ )
65
+ model.change(fn=change_model, inputs=model, outputs=[])
66
+
67
+ demo.launch()
68
+
69
+ """ gr.Interface(
70
  inference,
71
  gr.inputs.Audio(type="filepath", label="Input"),
72
  [gr.outputs.File(label="Output")],
 
74
  description=description,
75
  article=article,
76
  examples=examples,
77
+ ).launch().queue() """