hf-lin commited on
Commit
9644bee
1 Parent(s): f5261ab

render abc notation

Browse files
Files changed (3) hide show
  1. app.py +16 -21
  2. packages.txt +2 -2
  3. requirements.txt +0 -3
app.py CHANGED
@@ -7,15 +7,11 @@ import subprocess
7
  from uuid import uuid4
8
  import gradio as gr
9
  import torch
10
- import torchaudio
11
  from transformers import AutoModelForCausalLM, AutoTokenizer
12
  from transformers.generation import GenerationConfig
13
- from symusic import Score, Synthesizer
14
  import spaces
15
 
16
  os.environ['QT_QPA_PLATFORM']='offscreen'
17
- subprocess.run("modprobe fuse", shell=True, capture_output=True, text=True)
18
- subprocess.run("chmod +x MuseScore-4.1.1.232071203-x86_64.AppImage", shell=True)
19
 
20
  torch.backends.cuda.enable_mem_efficient_sdp(False)
21
  torch.backends.cuda.enable_flash_sdp(False)
@@ -94,27 +90,26 @@ def convert_history_to_text(task_history):
94
  # todo
95
  def postprocess_abc(text, conversation_id):
96
  os.makedirs(f"tmp/{conversation_id}", exist_ok=True)
97
- ts = time.time()
98
-
99
  abc_pattern = r'(X:\d+\n(?:[^\n]*\n)+)'
100
  abc_notation = re.findall(abc_pattern, text+'\n')
101
  print(f'extract abc block: {abc_notation}')
102
  if abc_notation:
103
- # render ABC as audio
104
- s = Score.from_abc(abc_notation[0])
105
- audio = Synthesizer().render(s, stereo=True)
106
- audio_file = f'tmp/{conversation_id}/{ts}.mp3'
107
- torchaudio.save(audio_file, torch.FloatTensor(audio), 44100)
108
-
109
- # Convert abc notation to SVG
110
  tmp_midi = f'tmp/{conversation_id}/{ts}.mid'
111
- s.dump_midi(tmp_midi)
 
112
  svg_file = f'tmp/{conversation_id}/{ts}.svg'
113
- subprocess.run(["./MuseScore-4.1.1.232071203-x86_64.AppImage", "-f", "-o", svg_file, tmp_midi])
114
-
115
- return svg_file, audio_file
 
116
  else:
117
- return None, None
118
 
119
 
120
  def _launch_demo(model, tokenizer):
@@ -193,10 +188,10 @@ def _launch_demo(model, tokenizer):
193
  &nbsp<a href="https://huggingface.co/datasets/m-a-p/MusicPile">🤗 Pretrain Dataset</a>&nbsp |
194
  &nbsp<a href="https://huggingface.co/datasets/m-a-p/MusicPile-sft">🤗 SFT Dataset</a></center>""")
195
  gr.Markdown("""\
196
- <center><font size=4>💡Note: The music clips on this page is auto-converted from abc notations which may not be perfect,
197
  and we recommend using better software for analysis.</center>""")
198
 
199
- chatbot = gr.Chatbot(label='Chat-Musician', elem_classes="control-height", height=750)
200
  query = gr.Textbox(lines=2, label='Input')
201
  task_history = gr.State([])
202
 
@@ -207,8 +202,8 @@ def _launch_demo(model, tokenizer):
207
  gr.Examples(
208
  examples=[
209
  ["Create music by following the alphabetic representation of the assigned musical structure and the given motif.\n'ABCA';X:1\nL:1/16\nM:2/4\nK:A\n['E2GB d2c2 B2A2', 'D2 C2E2 A2c2']"],
 
210
  ["Create sheet music in ABC notation from the provided text.\nAlternative title: \nThe Legacy\nKey: G\nMeter: 6/8\nNote Length: 1/8\nRhythm: Jig\nOrigin: English\nTranscription: John Chambers"],
211
- ["Develop a melody using the given chord pattern.\n'C', 'C', 'G/D', 'D', 'G', 'C', 'G', 'G', 'C', 'C', 'F', 'C/G', 'G7', 'C'"]
212
  ],
213
  inputs=query
214
  )
 
7
  from uuid import uuid4
8
  import gradio as gr
9
  import torch
 
10
  from transformers import AutoModelForCausalLM, AutoTokenizer
11
  from transformers.generation import GenerationConfig
 
12
  import spaces
13
 
14
  os.environ['QT_QPA_PLATFORM']='offscreen'
 
 
15
 
16
  torch.backends.cuda.enable_mem_efficient_sdp(False)
17
  torch.backends.cuda.enable_flash_sdp(False)
 
90
  # todo
91
  def postprocess_abc(text, conversation_id):
92
  os.makedirs(f"tmp/{conversation_id}", exist_ok=True)
 
 
93
  abc_pattern = r'(X:\d+\n(?:[^\n]*\n)+)'
94
  abc_notation = re.findall(abc_pattern, text+'\n')
95
  print(f'extract abc block: {abc_notation}')
96
  if abc_notation:
97
+ ts = time.time()
98
+ # Write the ABC text to a temporary file
99
+ tmp_abc = f"tmp/{conversation_id}/{ts}.abc"
100
+ with open(tmp_abc, "w") as abc_file:
101
+ abc_file.write(abc_notation[0])
102
+ # Convert abc notation to midi
 
103
  tmp_midi = f'tmp/{conversation_id}/{ts}.mid'
104
+ subprocess.run(["abc2midi", str(tmp_abc), "-o", tmp_midi])
105
+ # Convert abc notation to SVG
106
  svg_file = f'tmp/{conversation_id}/{ts}.svg'
107
+ audio_file = f'tmp/{conversation_id}/{ts}.mp3'
108
+ subprocess.run(["musescore", "-o", svg_file, tmp_midi], capture_output=True, text=True)
109
+ subprocess.run(["musescore","-o", audio_file, tmp_midi])
110
+ return svg_file.replace(".svg", "-1.svg"), audio_file
111
  else:
112
+ return None, None
113
 
114
 
115
  def _launch_demo(model, tokenizer):
 
188
  &nbsp<a href="https://huggingface.co/datasets/m-a-p/MusicPile">🤗 Pretrain Dataset</a>&nbsp |
189
  &nbsp<a href="https://huggingface.co/datasets/m-a-p/MusicPile-sft">🤗 SFT Dataset</a></center>""")
190
  gr.Markdown("""\
191
+ <center><font size=4>💡Note: The music clips on this page is auto-converted using musescore2 which may not be perfect,
192
  and we recommend using better software for analysis.</center>""")
193
 
194
+ chatbot = gr.Chatbot(label='ChatMusician', elem_classes="control-height", height=750)
195
  query = gr.Textbox(lines=2, label='Input')
196
  task_history = gr.State([])
197
 
 
202
  gr.Examples(
203
  examples=[
204
  ["Create music by following the alphabetic representation of the assigned musical structure and the given motif.\n'ABCA';X:1\nL:1/16\nM:2/4\nK:A\n['E2GB d2c2 B2A2', 'D2 C2E2 A2c2']"],
205
+ ["Develop a melody using the given chord pattern.\n'C', 'C', 'G/D', 'D', 'G', 'C', 'G', 'G', 'C', 'C', 'F', 'C/G', 'G7', 'C'"],
206
  ["Create sheet music in ABC notation from the provided text.\nAlternative title: \nThe Legacy\nKey: G\nMeter: 6/8\nNote Length: 1/8\nRhythm: Jig\nOrigin: English\nTranscription: John Chambers"],
 
207
  ],
208
  inputs=query
209
  )
packages.txt CHANGED
@@ -1,2 +1,2 @@
1
- fuse
2
- libfuse2
 
1
+ abcmidi
2
+ musescore
requirements.txt CHANGED
@@ -1,6 +1,3 @@
1
- gradio==4.19.2
2
- symusic==0.4.2
3
  torch==2.2.1
4
- torchaudio==2.2.1
5
  transformers==4.32.0
6
  accelerate==0.25.0
 
 
 
1
  torch==2.2.1
 
2
  transformers==4.32.0
3
  accelerate==0.25.0