juancopi81 commited on
Commit
cf24f3c
1 Parent(s): ed28ae4

Get metadata from youtube link

Browse files
Files changed (2) hide show
  1. app.py +24 -12
  2. requirements.txt +2 -1
app.py CHANGED
@@ -5,6 +5,7 @@ os.system("python3 -m pip install -e .")
5
  import gradio as gr
6
 
7
  import note_seq
 
8
 
9
  from inferencemodel import InferenceModel
10
  from utils import upload_audio
@@ -24,6 +25,11 @@ def change_model(model):
24
  inference_model = InferenceModel("/home/user/app/checkpoints/mt3/", model)
25
  current_model = model
26
 
 
 
 
 
 
27
  def inference(audio):
28
  with open(audio, "rb") as fd:
29
  contents = fd.read()
@@ -37,8 +43,8 @@ def inference(audio):
37
  return "./transcribed.mid"
38
 
39
  title = "Transcribe music from YouTube videos using Transformers."
40
- description = """"
41
- Gradio demo for Music Transcription with Transformers Read more in the links below.
42
  """
43
 
44
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.03017' target='_blank'>MT3: Multi-Task Multitrack Music Transcription</a> | <a href='https://github.com/magenta/mt3' target='_blank'>Github Repo</a></p>"
@@ -53,16 +59,22 @@ with demo:
53
  + "</h1>")
54
  gr.Markdown(description)
55
  with gr.Box():
56
- with gr.Row():
57
- gr.Markdown("<h2>Select your model</h2>")
58
- gr.Markdown("""
59
- The ismir2021 model transcribes piano only, with note velocities.
60
- The mt3 model transcribes multiple simultaneous instruments, but without velocities."
61
- """)
62
- model = gr.Radio(
63
- ["mt3", "ismir2021"], label="What kind of model you want to use?"
64
- )
65
- model.change(fn=change_model, inputs=model, outputs=[])
 
 
 
 
 
 
66
 
67
  demo.launch()
68
 
 
5
  import gradio as gr
6
 
7
  import note_seq
8
+ from pytube import YouTube
9
 
10
  from inferencemodel import InferenceModel
11
  from utils import upload_audio
 
25
  inference_model = InferenceModel("/home/user/app/checkpoints/mt3/", model)
26
  current_model = model
27
 
28
+ # Credits https://huggingface.co/spaces/jeffistyping/Youtube-Whisperer
29
+ def populate_metadata(link):
30
+ yt = YouTube(link)
31
+ return yt.thumbnail_url, yt.title
32
+
33
  def inference(audio):
34
  with open(audio, "rb") as fd:
35
  contents = fd.read()
 
43
  return "./transcribed.mid"
44
 
45
  title = "Transcribe music from YouTube videos using Transformers."
46
+ description = """
47
+ Gradio demo for Music Transcription with Transformers. Read more in the links below.
48
  """
49
 
50
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.03017' target='_blank'>MT3: Multi-Task Multitrack Music Transcription</a> | <a href='https://github.com/magenta/mt3' target='_blank'>Github Repo</a></p>"
 
59
  + "</h1>")
60
  gr.Markdown(description)
61
  with gr.Box():
62
+ gr.Markdown("<h2>Select your model</h2>")
63
+ gr.Markdown("""
64
+ The ismir2021 model transcribes piano only, with note velocities.
65
+ The mt3 model transcribes multiple simultaneous instruments, but without velocities."
66
+ """)
67
+ model = gr.Radio(
68
+ ["mt3", "ismir2021"], label="What kind of model you want to use?"
69
+ )
70
+ model.change(fn=change_model, inputs=model, outputs=[])
71
+
72
+ link = gr.Textbox(label="YouTube Link")
73
+ with gr.Row().style(mobile_collapse=False, equal_height=True):
74
+ title = gr.Label(label="Video Title", placeholder="Title")
75
+ img = gr.Image(label="Thumbnail")
76
+
77
+ link.change(fn=populate_metadata, inputs=link, outputs=[img, title])
78
 
79
  demo.launch()
80
 
requirements.txt CHANGED
@@ -7,4 +7,5 @@ jax[cpu]==0.3.15 -f https://storage.googleapis.com/jax-releases/jax_releases.htm
7
  # pin CLU for python 3.7 compatibility
8
  clu==0.0.7
9
  # pin Orbax to use Checkpointer
10
- orbax==0.0.2
 
 
7
  # pin CLU for python 3.7 compatibility
8
  clu==0.0.7
9
  # pin Orbax to use Checkpointer
10
+ orbax==0.0.2
11
+ pytube