sadafwalliyani commited on
Commit
2fc58bd
1 Parent(s): 8fa7a29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -39
app.py CHANGED
@@ -1,20 +1,26 @@
1
  import streamlit as st
2
  import torch
3
  import torchaudio
4
- from audiocraft.models import MusicGen
5
  import os
6
  import numpy as np
7
  import base64
 
 
 
 
8
 
9
- genres = ["Pop","Hip-Hop", "Classical","Lofi", "Chillpop","Country","R&G", "Folk","EDM", "Disco", "House", "Techno",]
 
 
 
 
10
 
11
  @st.cache_resource()
12
  def load_model():
13
  model = MusicGen.get_pretrained('facebook/musicgen-small')
14
  return model
15
-
16
 
17
- def generate_music_tensors(description, duration: int, batch_size=1):
18
  model = load_model()
19
 
20
  model.set_generation_params(
@@ -32,9 +38,8 @@ def generate_music_tensors(description, duration: int, batch_size=1):
32
 
33
  st.success("Music Generation Complete!")
34
  return output
35
-
36
 
37
- def save_audio(samples: torch.Tensor, filename):
38
  sample_rate = 30000
39
  save_path = "audio_output"
40
  assert samples.dim() == 2 or samples.dim() == 3
@@ -44,9 +49,8 @@ def save_audio(samples: torch.Tensor, filename):
44
  samples = samples[None, ...]
45
 
46
  for idx, audio in enumerate(samples):
47
- audio_path = os.path.join(save_path, f"{filename}_{idx}.wav")
48
  torchaudio.save(audio_path, audio, sample_rate)
49
- return audio_path
50
 
51
  def get_binary_file_downloader_html(bin_file, file_label='File'):
52
  with open(bin_file, 'rb') as f:
@@ -61,52 +65,56 @@ st.set_page_config(
61
  )
62
 
63
  def main():
64
- st.title(" 🎶 AI Composer Small-Model 🎶")
65
 
66
  st.subheader("Craft your perfect melody!")
67
-
68
  bpm = st.number_input("Enter Speed in BPM", min_value=60)
69
- text_area = st.text_area('Example: 80s rock song with guitar and drums', height=50)
70
- selected_genre = st.selectbox("Select Genre (Optional)", genres, None)
71
- time_slider = st.slider("Select time duration (In Seconds)", 0, 60, 10)
72
-
73
- mood = st.selectbox("Select Mood (Optional)", ["Happy", "Sad", "Angry", "Relaxed", "Energetic"], None)
74
- instrument = st.selectbox("Select Instrument (Optional)", ["Piano", "Guitar", "Flute", "Violin", "Drums"], None)
75
- tempo = st.selectbox("Select Tempo (Optional)", ["Slow", "Moderate", "Fast"], None)
76
- melody = st.text_input("Enter Melody or Chord Progression (Optional)", "e.g: C D:min G:7 C, Twinkle Twinkle Little Star")
 
 
 
 
77
 
78
  if st.button('Let\'s Generate 🎶'):
79
  st.text('\n\n')
80
  st.subheader("Generated Music")
81
-
82
- description = f"{text_area}"
 
83
  if selected_genre:
84
  description += f" {selected_genre}"
 
85
  if bpm:
86
  description += f" {bpm} BPM"
87
- if mood:
88
- description += f" {mood}"
89
- if instrument:
90
- description += f" {instrument}"
91
- if tempo:
92
- description += f" {tempo}"
93
- if melody:
94
- description += f" {melody}"
 
 
 
 
 
 
95
 
96
  music_tensors = generate_music_tensors(description, time_slider)
97
 
 
98
  idx = 0
99
-
100
- # audio_path = save_audio(music_tensors[idx], "audio_output")
101
- # audio_file = open(audio_path, 'rb')
102
- # audio_bytes = audio_file.read()
103
-
104
- # st.audio(audio_bytes, format='audio/wav')
105
- # st.markdown(get_binary_file_downloader_html(audio_path, f'Audio_{idx}'), unsafe_allow_html=True)
106
-
107
  music_tensor = music_tensors[idx]
108
- save_music_file = save_audio(music_tensor)
109
- audio_filepath = f'audio_output/audio_{idx}.wav'
110
  audio_file = open(audio_filepath, 'rb')
111
  audio_bytes = audio_file.read()
112
 
@@ -115,4 +123,4 @@ def main():
115
  st.markdown(get_binary_file_downloader_html(audio_filepath, f'Audio_{idx}'), unsafe_allow_html=True)
116
 
117
  if __name__ == "__main__":
118
- main()
 
1
  import streamlit as st
2
  import torch
3
  import torchaudio
 
4
  import os
5
  import numpy as np
6
  import base64
7
+ from audiocraft.models import MusicGen
8
+
9
+ # Before
10
+ batch_size = 64
11
 
12
+ # After
13
+ batch_size = 32
14
+ torch.cuda.empty_cache()
15
+
16
+ genres = ["Pop", "Rock", "Jazz", "Electronic", "Hip-Hop", "Classical", "Lofi", "Chillpop"]
17
 
18
  @st.cache_resource()
19
  def load_model():
20
  model = MusicGen.get_pretrained('facebook/musicgen-small')
21
  return model
 
22
 
23
+ def generate_music_tensors(description, duration: int):
24
  model = load_model()
25
 
26
  model.set_generation_params(
 
38
 
39
  st.success("Music Generation Complete!")
40
  return output
 
41
 
42
+ def save_audio(samples: torch.Tensor):
43
  sample_rate = 30000
44
  save_path = "audio_output"
45
  assert samples.dim() == 2 or samples.dim() == 3
 
49
  samples = samples[None, ...]
50
 
51
  for idx, audio in enumerate(samples):
52
+ audio_path = os.path.join(save_path, f"audio_{idx}.wav")
53
  torchaudio.save(audio_path, audio, sample_rate)
 
54
 
55
  def get_binary_file_downloader_html(bin_file, file_label='File'):
56
  with open(bin_file, 'rb') as f:
 
65
  )
66
 
67
  def main():
68
+ st.title("🎧 AI Composer Medium-Model 🎧")
69
 
70
  st.subheader("Craft your perfect melody!")
 
71
  bpm = st.number_input("Enter Speed in BPM", min_value=60)
72
+
73
+ text_area = st.text_area('Ex : 80s rock song with guitar and drums')
74
+ st.text('')
75
+ # Dropdown for genres
76
+ selected_genre = st.selectbox("Select Genre", genres)
77
+
78
+ st.subheader("2. Select time duration (In Seconds)")
79
+ time_slider = st.slider("Select time duration (In Seconds)", 0, 30, 10)
80
+ # mood = st.selectbox("Select Mood (Optional)", ["Happy", "Sad", "Angry", "Relaxed", "Energetic"], None)
81
+ # instrument = st.selectbox("Select Instrument (Optional)", ["Piano", "Guitar", "Flute", "Violin", "Drums"], None)
82
+ # tempo = st.selectbox("Select Tempo (Optional)", ["Slow", "Moderate", "Fast"], None)
83
+ # melody = st.text_input("Enter Melody or Chord Progression (Optional)", "e.g: C D:min G:7 C, Twinkle Twinkle Little Star")
84
 
85
  if st.button('Let\'s Generate 🎶'):
86
  st.text('\n\n')
87
  st.subheader("Generated Music")
88
+
89
+ # Generate audio
90
+ description = text_area # Initialize description with text_area
91
  if selected_genre:
92
  description += f" {selected_genre}"
93
+ st.empty() # Hide the selected_genre selectbox after selecting one option
94
  if bpm:
95
  description += f" {bpm} BPM"
96
+ # if mood:
97
+ # description += f" {mood}"
98
+ # st.empty() # Hide the mood selectbox after selecting one option
99
+ # if instrument:
100
+ # description += f" {instrument}"
101
+ # st.empty() # Hide the instrument selectbox after selecting one option
102
+ # if tempo:
103
+ # description += f" {tempo}"
104
+ # st.empty() # Hide the tempo selectbox after selecting one option
105
+ # if melody:
106
+ # description += f" {melody}"
107
+
108
+ # Clear CUDA memory cache before generating music
109
+ torch.cuda.empty_cache()
110
 
111
  music_tensors = generate_music_tensors(description, time_slider)
112
 
113
+ # Only play the full audio for index 0
114
  idx = 0
 
 
 
 
 
 
 
 
115
  music_tensor = music_tensors[idx]
116
+ save_audio(music_tensor)
117
+ audio_filepath = f'/audio_output/audio_{idx}.wav'
118
  audio_file = open(audio_filepath, 'rb')
119
  audio_bytes = audio_file.read()
120
 
 
123
  st.markdown(get_binary_file_downloader_html(audio_filepath, f'Audio_{idx}'), unsafe_allow_html=True)
124
 
125
  if __name__ == "__main__":
126
+ main()