asigalov61 commited on
Commit
c58641e
1 Parent(s): 11f0dcb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -20,9 +20,9 @@ in_space = os.getenv("SYSTEM") == "spaces"
20
  #=================================================================================================
21
 
22
  @torch.no_grad()
23
- def GenerateMIDI(idrums, iinstr, progress=gr.Progress()):
24
 
25
- number_of_notes_to_generate = 140
26
 
27
  if idrums:
28
  drums = 3074
@@ -40,12 +40,12 @@ def GenerateMIDI(idrums, iinstr, progress=gr.Progress()):
40
 
41
  outy = start_tokens
42
 
43
- for i in progress.tqdm(range(number_of_notes_to_generate)):
44
 
45
  inp = torch.LongTensor([outy]).cpu()
46
 
47
  out = model.module.generate(inp,
48
- 3,
49
  temperature=0.9,
50
  return_prime=False,
51
  verbose=True)
@@ -205,14 +205,16 @@ if __name__ == "__main__":
205
  "(https://colab.research.google.com/github/asigalov61/Allegro-Music-Transformer/blob/main/Allegro_Music_Transformer_Composer.ipynb)"
206
  " for faster execution and endless generation"
207
  )
 
208
  input_drums = gr.Checkbox(label="Drums Controls", value = False, info="Drums present or not")
209
  input_instrument = gr.Radio(["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", "Choir", "Organ"], value="Piano", label="Lead Instrument Controls", info="Desired lead instrument")
 
210
  run_btn = gr.Button("generate", variant="primary")
211
 
212
  output_midi_seq = gr.Variable()
213
  output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio")
214
  output_plot = gr.Plot(label="output plot")
215
  output_midi = gr.File(label="output midi", file_types=[".mid"])
216
- run_event = run_btn.click(GenerateMIDI, [input_drums, input_instrument], [output_midi_seq, output_plot, output_midi, output_audio])
217
 
218
  app.queue(concurrency_count=1).launch(server_port=opt.port, share=opt.share, inbrowser=True)
 
20
  #=================================================================================================
21
 
22
  @torch.no_grad()
23
+ def GenerateMIDI(num_tok, idrums, iinstr, progress=gr.Progress()):
24
 
25
+ print('Req num tok', num_tok)
26
 
27
  if idrums:
28
  drums = 3074
 
40
 
41
  outy = start_tokens
42
 
43
+ for i in progress.tqdm(range(num_tok)):
44
 
45
  inp = torch.LongTensor([outy]).cpu()
46
 
47
  out = model.module.generate(inp,
48
+ 1,
49
  temperature=0.9,
50
  return_prime=False,
51
  verbose=True)
 
205
  "(https://colab.research.google.com/github/asigalov61/Allegro-Music-Transformer/blob/main/Allegro_Music_Transformer_Composer.ipynb)"
206
  " for faster execution and endless generation"
207
  )
208
+
209
  input_drums = gr.Checkbox(label="Drums Controls", value = False, info="Drums present or not")
210
  input_instrument = gr.Radio(["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", "Choir", "Organ"], value="Piano", label="Lead Instrument Controls", info="Desired lead instrument")
211
+ input_num_tokens = gr.Slider(16, 512, value=256, label="Number of Tokens", info="Number of tokens to generate")
212
  run_btn = gr.Button("generate", variant="primary")
213
 
214
  output_midi_seq = gr.Variable()
215
  output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio")
216
  output_plot = gr.Plot(label="output plot")
217
  output_midi = gr.File(label="output midi", file_types=[".mid"])
218
+ run_event = run_btn.click(GenerateMIDI, [input_num_tokens, input_drums, input_instrument], [output_midi_seq, output_plot, output_midi, output_audio])
219
 
220
  app.queue(concurrency_count=1).launch(server_port=opt.port, share=opt.share, inbrowser=True)