File size: 2,711 Bytes
27b1390
3f9fabf
b1d66c1
f3c0738
27b1390
afa4c92
27b1390
3f9fabf
6e76183
3f9fabf
 
 
 
 
 
 
 
 
6e76183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4bb84bd
 
3f9fabf
 
4bb84bd
 
f3c0738
 
 
afa4c92
b1d66c1
afa4c92
76d8f16
4bb84bd
b1d66c1
 
6e76183
76d8f16
 
 
 
6e76183
 
76d8f16
 
 
 
 
4bb84bd
b1d66c1
f3c0738
 
 
 
 
b1d66c1
 
6e76183
3f9fabf
 
afa4c92
 
 
 
 
 
 
4bb84bd
afa4c92
6e76183
 
 
 
 
 
 
 
468ea08
f3c0738
468ea08
 
 
 
 
 
 
 
 
 
 
 
4bb84bd
 
afa4c92
f3c0738
756b159
afa4c92
b1d66c1
3f9fabf
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import gradio as gr
import os
import allin1
import time

from pathlib import Path

HEADER = """
<header style="text-align: center;">
  <h1>
    All-In-One Music Structure Analyzer 🔮
  </h1>
  <p>
    <a href="https://github.com/mir-aidj/all-in-one">[Python Package]</a>
    <a href="https://arxiv.org/abs/2307.16425">[Paper]</a>
    <a href="https://taejun.kim/music-dissector/">[Visual Demo]</a>
  </p>
</header>
<main
  style="display: flex; justify-content: center;"
>
  <div
    style="display: inline-block;"
  >
    <p>
      This Space demonstrates the music structure analyzer predicts:
      <ul
        style="padding-left: 1rem;"
      >
        <li>BPM</li>
        <li>Beats</li>
        <li>Downbeats</li>
        <li>Functional segment boundaries</li>
        <li>Functional segment labels (e.g. intro, verse, chorus, bridge, outro)</li>
      </ul>
    </p>
    <p>
      For more information, please visit the links above ✨🧸
    </p>
  </div>
</main>
"""

CACHE_EXAMPLES = os.getenv('CACHE_EXAMPLES', '1') == '1'


def analyze(path):
  #Measure time for inference
  start = time.time()
    
  path = Path(path)
  result = allin1.analyze(
    path,
    out_dir='./struct',
    multiprocess=False,
    keep_byproducts=True,  # TODO: remove this
  )

  fig = allin1.visualize(
    result,
    multiprocess=False,
  )
  fig.set_dpi(300)

  allin1.sonify(
    result,
    out_dir='./sonif',
    multiprocess=False,
  )
  sonif_path = Path(f'./sonif/{path.stem}.sonif{path.suffix}').resolve().as_posix()

  #Measure time for inference
  end = time.time()
  elapsed_time = end-start

  return result.bpm, fig, sonif_path, elapsed_time


with gr.Blocks() as demo:
  gr.HTML(HEADER)

  input_audio_path = gr.Audio(
    label='Input',
    source='upload',
    type='filepath',
    format='mp3',
    show_download_button=False,
  )
  button = gr.Button('Analyze', variant='primary')
  output_viz = gr.Plot(label='Visualization')
  with gr.Row():
    output_bpm = gr.Textbox(label='BPM', scale=1)
    output_sonif = gr.Audio(
      label='Sonification',
      type='filepath',
      format='mp3',
      show_download_button=False,
      scale=9,
    )
    elapsed_time = gr.Textbox(label='Overall inference time', scale=1)
  
  #gr.Examples(
  #  examples=[
  #    './assets/NewJeans - Super Shy.mp3',
  #    './assets/Bruno Mars - 24k Magic.mp3'
  #  ],
  #  inputs=input_audio_path,
  #  outputs=[output_bpm, output_viz, output_sonif],
  #  fn=analyze,
  #  cache_examples=CACHE_EXAMPLES,
  #)
  
  button.click(
    fn=analyze,
    inputs=input_audio_path,
    outputs=[output_bpm, output_viz, output_sonif, elapsed_time],
    api_name='analyze',
  )

if __name__ == '__main__':
  demo.launch()