asigalov61 commited on
Commit
de73f36
1 Parent(s): a914076

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -24
app.py CHANGED
@@ -110,7 +110,18 @@ def GenerateMIDI(num_tok, idrums, iinstr):
110
  print('Done!')
111
  print('=' * 70)
112
 
113
- outy = start_tokens
 
 
 
 
 
 
 
 
 
 
 
114
 
115
  ctime = 0
116
  dur = 0
@@ -118,30 +129,15 @@ def GenerateMIDI(num_tok, idrums, iinstr):
118
  pitch = 0
119
  channel = 0
120
 
121
- for i in range(max(1, min(1024, num_tok))):
122
-
123
- inp = torch.LongTensor([outy]).cuda()
124
-
125
- with torch.amp.autocast(device_type='cuda', dtype=torch.bfloat16):
126
- with torch.inference_mode():
127
- out = model.module.generate(inp,
128
- 1,
129
- temperature=0.9,
130
- return_prime=False,
131
- verbose=False)
132
-
133
- out0 = out[0].tolist()
134
- outy.extend(out0)
135
-
136
- ss1 = out0[0]
137
 
138
  if 0 < ss1 < 256:
139
  ctime += ss1 * 8
140
-
141
  if 256 <= ss1 < 1280:
142
  dur = ((ss1 - 256) // 8) * 32
143
  vel = (((ss1 - 256) % 8) + 1) * 15
144
-
145
  if 1280 <= ss1 < 2816:
146
  channel = (ss1 - 1280) // 128
147
  pitch = (ss1 - 1280) % 128
@@ -155,12 +151,12 @@ def GenerateMIDI(num_tok, idrums, iinstr):
155
 
156
  output[-1].append(event)
157
 
158
- midi_data = TMIDIX.score2midi(output, text_encoding)
159
 
160
  with open(f"Allegro-Music-Transformer-Composition.mid", 'wb') as f:
161
  f.write(midi_data)
162
 
163
- output_plot = TMIDIX.plot_ms_SONG(output[2], plot_title='Allegro-Music-Transformer-Composition', return_plt=True)
164
 
165
  audio = midi_to_colab_audio('Allegro-Music-Transformer-Composition.mid',
166
  soundfont_path="SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2",
@@ -169,9 +165,7 @@ def GenerateMIDI(num_tok, idrums, iinstr):
169
  output_for_gradio=True
170
  )
171
 
172
- print('Sample INTs', outy[:16])
173
- print('-' * 70)
174
- print('Last generated MIDI event', output[2][-1])
175
  print('-' * 70)
176
  print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
177
  print('-' * 70)
 
110
  print('Done!')
111
  print('=' * 70)
112
 
113
+
114
+ inp = torch.LongTensor([start_tokens]).cuda()
115
+
116
+ with torch.amp.autocast(device_type='cuda', dtype=torch.bfloat16):
117
+ with torch.inference_mode():
118
+ out = model.module.generate(inp,
119
+ max(1, min(1024, num_tok)),
120
+ temperature=0.9,
121
+ return_prime=False,
122
+ verbose=False)
123
+
124
+ out0 = out[0].tolist()
125
 
126
  ctime = 0
127
  dur = 0
 
129
  pitch = 0
130
  channel = 0
131
 
132
+ for ss1 in out0:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
  if 0 < ss1 < 256:
135
  ctime += ss1 * 8
136
+
137
  if 256 <= ss1 < 1280:
138
  dur = ((ss1 - 256) // 8) * 32
139
  vel = (((ss1 - 256) % 8) + 1) * 15
140
+
141
  if 1280 <= ss1 < 2816:
142
  channel = (ss1 - 1280) // 128
143
  pitch = (ss1 - 1280) % 128
 
151
 
152
  output[-1].append(event)
153
 
154
+ midi_data = TMIDIX.score2midi(output[2], text_encoding)
155
 
156
  with open(f"Allegro-Music-Transformer-Composition.mid", 'wb') as f:
157
  f.write(midi_data)
158
 
159
+ output_plot = TMIDIX.plot_ms_SONG(output, plot_title='Allegro-Music-Transformer-Composition', return_plt=True)
160
 
161
  audio = midi_to_colab_audio('Allegro-Music-Transformer-Composition.mid',
162
  soundfont_path="SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2",
 
165
  output_for_gradio=True
166
  )
167
 
168
+ print('First generated MIDI events', output[2][:3])
 
 
169
  print('-' * 70)
170
  print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
171
  print('-' * 70)