juancopi81 commited on
Commit
058f5c3
1 Parent(s): 2ab00ef

Add logs to debug

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -33,16 +33,15 @@ def get_audio(url):
33
  video = yt.streams.filter(only_audio=True).first()
34
  out_file = video.download(output_path=".")
35
  base, ext = os.path.splitext(out_file)
36
- print("the extension is", ext)
37
  new_file = base + ".wav"
38
  os.rename(out_file, new_file)
39
  a = new_file
40
-
41
  wav_to_cut = AudioSegment.from_wav(a)
42
  # pydub does things in milliseconds
43
  ten_seconds = 10 * 1000
44
  first_10_seconds = wav_to_cut[:ten_seconds]
45
-
46
  return first_10_seconds
47
 
48
  # Credits https://huggingface.co/spaces/jeffistyping/Youtube-Whisperer
@@ -67,7 +66,6 @@ title = "Transcribe music from YouTube videos using Transformers."
67
  description = """
68
  Gradio demo for Music Transcription with Transformers. Read more in the links below.
69
  """
70
-
71
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.03017' target='_blank'>MT3: Multi-Task Multitrack Music Transcription</a> | <a href='https://github.com/magenta/mt3' target='_blank'>Github Repo</a></p>"
72
 
73
  # Create a block object
@@ -80,10 +78,9 @@ with demo:
80
  + "</h1>")
81
  gr.Markdown(description)
82
  with gr.Box():
83
- gr.Markdown("<h2>Select your model</h2>")
84
  gr.Markdown("""
85
- The ismir2021 model transcribes piano only, with note velocities.
86
- The mt3 model transcribes multiple simultaneous instruments, but without velocities."
87
  """)
88
  model = gr.Radio(
89
  ["mt3", "ismir2021"], label="What kind of model you want to use?", value="mt3"
@@ -96,7 +93,20 @@ with demo:
96
  img = gr.Image(label="Thumbnail")
97
  with gr.Row():
98
  yt_audio = gr.Audio()
 
99
  link.change(fn=populate_metadata, inputs=link, outputs=[img, title, yt_audio])
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  demo.launch()
102
 
 
33
  video = yt.streams.filter(only_audio=True).first()
34
  out_file = video.download(output_path=".")
35
  base, ext = os.path.splitext(out_file)
 
36
  new_file = base + ".wav"
37
  os.rename(out_file, new_file)
38
  a = new_file
39
+ print("file a is:", a)
40
  wav_to_cut = AudioSegment.from_wav(a)
41
  # pydub does things in milliseconds
42
  ten_seconds = 10 * 1000
43
  first_10_seconds = wav_to_cut[:ten_seconds]
44
+ os.remove(new_file)
45
  return first_10_seconds
46
 
47
  # Credits https://huggingface.co/spaces/jeffistyping/Youtube-Whisperer
 
66
  description = """
67
  Gradio demo for Music Transcription with Transformers. Read more in the links below.
68
  """
 
69
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.03017' target='_blank'>MT3: Multi-Task Multitrack Music Transcription</a> | <a href='https://github.com/magenta/mt3' target='_blank'>Github Repo</a></p>"
70
 
71
  # Create a block object
 
78
  + "</h1>")
79
  gr.Markdown(description)
80
  with gr.Box():
 
81
  gr.Markdown("""
82
+ Select your model: The ismir2021 model transcribes piano only, with note velocities.
83
+ The mt3 model transcribes multiple simultaneous instruments, but without velocities.
84
  """)
85
  model = gr.Radio(
86
  ["mt3", "ismir2021"], label="What kind of model you want to use?", value="mt3"
 
93
  img = gr.Image(label="Thumbnail")
94
  with gr.Row():
95
  yt_audio = gr.Audio()
96
+
97
  link.change(fn=populate_metadata, inputs=link, outputs=[img, title, yt_audio])
98
+
99
+ with gr.Row():
100
+ btn = gr.Button("Transcribe music")
101
+
102
+ audio_file = gr.File()
103
+
104
+ btn.click(inference,
105
+ inputs = [
106
+ yt_audio
107
+ ],
108
+ outputs=audio_file)
109
+
110
 
111
  demo.launch()
112