AlekseyKorshuk commited on
Commit
95addfe
1 Parent(s): 1f285f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -28
app.py CHANGED
@@ -1,48 +1,64 @@
1
- import gradio as gr
2
  import numpy as np
3
  import pretty_midi
4
  from accompaniment_generator.generator.base import Generator
5
  import os
6
  import uuid
7
- from midi2audio import FluidSynth
8
  import time
 
9
  from scipy.io import wavfile
10
 
11
 
12
  def inference(audio, num_epoch):
13
  generator = Generator()
14
- input_midi_data = pretty_midi.PrettyMIDI(audio.name)
15
- output_midi_data = generator(audio.name, num_epoch=int(num_epoch))
16
  name = uuid.uuid4()
17
  output_midi_data.write(f'{name}.mid')
18
  fs = FluidSynth("font.sf2")
19
  fs.midi_to_audio(f'{name}.mid', f'{name}.wav')
20
- fs.midi_to_audio(audio.name, f'{name}-init.wav')
21
- time.sleep(2)
22
- print([
23
- wavfile.read(f'{name}-init.wav'),
24
- wavfile.read(f'{name}.wav'),
25
- ])
26
- return [
27
- wavfile.read(f'{name}-init.wav'),
28
- wavfile.read(f'{name}.wav'),
29
- ]
30
-
31
- title = "Accompaniment Generator"
32
- description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
33
 
34
  article = "<p style='text-align: center'>" \
35
  "<a href='https://github.com/AlekseyKorshuk/accompaniment-generator' target='_blank'>Github Repo</a>" \
36
  "</p>"
37
 
38
- examples = [['barbiegirl_mono.mid', 10]]
39
-
40
- gr.Interface(
41
- inference,
42
- [gr.inputs.File(type="file", label="Input"), gr.inputs.Number(label="Number of epoch", default=10)],
43
- [gr.outputs.Audio(type="auto", label="Before"), gr.outputs.Audio(type="auto", label="After")],
44
- title=title,
45
- description=description,
46
- article=article,
47
- examples=examples,
48
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
  import numpy as np
3
  import pretty_midi
4
  from accompaniment_generator.generator.base import Generator
5
  import os
6
  import uuid
 
7
  import time
8
+ from midi2audio import FluidSynth
9
  from scipy.io import wavfile
10
 
11
 
12
  def inference(audio, num_epoch):
13
  generator = Generator()
14
+ output_midi_data = generator(audio, num_epoch=int(num_epoch))
 
15
  name = uuid.uuid4()
16
  output_midi_data.write(f'{name}.mid')
17
  fs = FluidSynth("font.sf2")
18
  fs.midi_to_audio(f'{name}.mid', f'{name}.wav')
19
+ fs.midi_to_audio(audio, f'{name}-init.wav')
20
+ # time.sleep(2)
21
+ print([f'{name}-init.wav', f'{name}.wav'])
22
+ return f'{name}-init.wav', f'{name}.wav'
23
+
24
+
25
+ st.title("Accompaniment Generator")
26
+ st.description = "Gradio demo for MIDI-DDSP: Detailed Control of Musical Performance via Hierarchical Modeling. To use it, simply upload your midi file, or click one of the examples to load them. Read more at the links below."
 
 
 
 
 
27
 
28
  article = "<p style='text-align: center'>" \
29
  "<a href='https://github.com/AlekseyKorshuk/accompaniment-generator' target='_blank'>Github Repo</a>" \
30
  "</p>"
31
 
32
+ from os import listdir
33
+ from os.path import isfile, join
34
+
35
+ onlyfiles = [f for f in listdir("./examples") if isfile(join("./examples", f))]
36
+
37
+ model_name = st.selectbox(
38
+ 'Select example MIDI file:',
39
+ onlyfiles
40
+ )
41
+
42
+ uploaded_file = st.file_uploader(
43
+ 'Upload MIDI file:'
44
+ )
45
+
46
+ num_epoch = st.number_input("Number of epochs:",
47
+ min_value=1,
48
+ max_value=1000,
49
+ step=1,
50
+ value=10,
51
+ )
52
+
53
+ generate_image_button = st.button("Generate")
54
+
55
+ if generate_image_button:
56
+ input_file = f"./examples/{model_name}"
57
+ if uploaded_file is not None:
58
+ input_file = uploaded_file.name
59
+ with st.spinner(text=f"Generating, this may take some time..."):
60
+ before, after = inference(input_file, num_epoch)
61
+ st.markdown("Before:")
62
+ st.audio(before)
63
+ st.markdown("After:")
64
+ st.audio(after)