juancopi81 commited on
Commit
a322e01
1 Parent(s): 87e11f4

Add ffmpeg

Browse files
Files changed (3) hide show
  1. Dockerfile +1 -1
  2. main.py +26 -14
  3. utils.py +3 -9
Dockerfile CHANGED
@@ -9,7 +9,7 @@ RUN DEBIAN_FRONTEND="noninteractive" apt-get -qq update && \
9
  DEBIAN_FRONTEND="noninteractive" apt-get install -y tzdata
10
 
11
  RUN apt-get update -qq && \
12
- apt-get install -qq python3-pip build-essential libasound2-dev libjack-dev wget cmake pkg-config libglib2.0-dev
13
 
14
  # Download libfluidsynth source
15
  RUN wget https://github.com/FluidSynth/fluidsynth/archive/refs/tags/v2.3.3.tar.gz && \
 
9
  DEBIAN_FRONTEND="noninteractive" apt-get install -y tzdata
10
 
11
  RUN apt-get update -qq && \
12
+ apt-get install -qq python3-pip build-essential libasound2-dev libjack-dev wget cmake pkg-config libglib2.0-dev ffmpeg
13
 
14
  # Download libfluidsynth source
15
  RUN wget https://github.com/FluidSynth/fluidsynth/archive/refs/tags/v2.3.3.tar.gz && \
main.py CHANGED
@@ -17,20 +17,32 @@ DESCRIPTION = """
17
  # 🎵 Multitrack Midi Generator 🎶
18
  This interactive application uses an AI model to generate music sequences based on a chosen genre and various user inputs.
19
 
20
- ## Features:
21
- - 🎼 Select the genre for the music.
22
- - 🌡️ Use the "Temperature" slider to adjust the randomness of the music generated (higher values will produce more random outputs).
23
- - ⏱️ Adjust the "Tempo" slider to change the speed of the music.
24
- - 🎹 Use the buttons to generate a new song from scratch, continue generation with the current settings, remove the last added instrument, regenerate the last added instrument with a new one, or change the tempo of the current song.
25
-
26
- ## Outputs:
27
- The app outputs the following:
28
-
29
- - 🎧 The audio of the generated song.
30
- - 📁 A MIDI file of the song.
31
- - 📊 A plot of the song's sequence.
32
- - 🎸 A list of the generated instruments.
33
- - 📝 The text sequence of the song.
 
 
 
 
 
 
 
 
 
 
 
 
34
  Enjoy creating your own AI-generated music! 🎵
35
  """
36
 
 
17
  # 🎵 Multitrack Midi Generator 🎶
18
  This interactive application uses an AI model to generate music sequences based on a chosen genre and various user inputs.
19
 
20
+ <table>
21
+ <tr>
22
+ <td valign="top">
23
+
24
+ ## Features:
25
+ - 🎼 Select the genre for the music.
26
+ - 🌡️ Use the "Temperature" slider to adjust the randomness of the music generated (higher values will produce more random outputs).
27
+ - ⏱️ Adjust the "Tempo" slider to change the speed of the music.
28
+ - 🎹 Use the buttons to generate a new song from scratch, continue generation with the current settings, remove the last added instrument, regenerate the last added instrument with a new one, or change the tempo of the current song.
29
+
30
+ </td>
31
+ <td valign="top">
32
+
33
+ ## Outputs:
34
+ The app outputs the following:
35
+
36
+ - 🎧 The audio of the generated song.
37
+ - 📁 A MIDI file of the song.
38
+ - 📊 A plot of the song's sequence.
39
+ - 🎸 A list of the generated instruments.
40
+ - 📝 The text sequence of the song.
41
+
42
+ </td>
43
+ </tr>
44
+ </table>
45
+
46
  Enjoy creating your own AI-generated music! 🎵
47
  """
48
 
utils.py CHANGED
@@ -16,9 +16,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
 
17
  # Load the tokenizer and the model
18
  tokenizer = AutoTokenizer.from_pretrained("juancopi81/lmd_8bars_tokenizer")
19
- model = AutoModelForCausalLM.from_pretrained(
20
- "juancopi81/lmd-8bars-2048-epochs20_v3"
21
- )
22
 
23
  # Move model to device
24
  model = model.to(device)
@@ -60,9 +58,7 @@ def get_instruments(text_sequence: str) -> List[str]:
60
  return instruments
61
 
62
 
63
- def generate_new_instrument(
64
- seed: str, temp: float = 0.75
65
- ) -> str:
66
  """
67
  Generates a new instrument sequence from a given seed and temperature.
68
 
@@ -242,9 +238,7 @@ def generate_song(
242
  else:
243
  seed_string = text_sequence
244
 
245
- generated_sequence = generate_new_instrument(
246
- seed=seed_string, temp=temp
247
- )
248
  audio, midi_file, fig, instruments_str, num_tokens = get_outputs_from_string(
249
  generated_sequence, qpm
250
  )
 
16
 
17
  # Load the tokenizer and the model
18
  tokenizer = AutoTokenizer.from_pretrained("juancopi81/lmd_8bars_tokenizer")
19
+ model = AutoModelForCausalLM.from_pretrained("juancopi81/lmd-8bars-2048-epochs20_v3")
 
 
20
 
21
  # Move model to device
22
  model = model.to(device)
 
58
  return instruments
59
 
60
 
61
+ def generate_new_instrument(seed: str, temp: float = 0.75) -> str:
 
 
62
  """
63
  Generates a new instrument sequence from a given seed and temperature.
64
 
 
238
  else:
239
  seed_string = text_sequence
240
 
241
+ generated_sequence = generate_new_instrument(seed=seed_string, temp=temp)
 
 
242
  audio, midi_file, fig, instruments_str, num_tokens = get_outputs_from_string(
243
  generated_sequence, qpm
244
  )