mcaaroni commited on
Commit
c5b9912
1 Parent(s): 7227afd

feat: implement first version of space for HARP

Browse files
Files changed (3) hide show
  1. .gitignore +3 -0
  2. app.py +90 -0
  3. requirements.txt +4 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *__pycache__
2
+ _outputs
3
+ src
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from audiocraft.data.audio_utils import normalize_audio
3
+ from audiocraft.models import MusicGen
4
+ from audiotools import AudioSignal
5
+
6
+ from pyharp import ModelCard, build_endpoint, save_and_return_filepath
7
+
8
+ card = ModelCard(
9
+ name='Micro Musicgen Jungle',
10
+ description="The jungle version of the micro-musicgen model series. Use a prompt duration of 0 to generate unconditional audio. (WHICH WORKS BETTER) Outpainting is not really tested by me, I just thought it would be cool to have it here because you work with input audio.\n\n HAVE FUNNNNNNNNN",
11
+ author='Aaron Abebe',
12
+ tags=['musicgen', 'jungle', 'micro-musicgen', 'unconditional', 'generation']
13
+ )
14
+
15
+ model = MusicGen.get_pretrained("pharoAIsanders420/micro-musicgen-jungle")
16
+
17
+
18
+ def process_fn(input_audio_path, gen_duration, prompt_duration):
19
+ """
20
+ Process the input audio and generate new audio by sampling from the micro-musicgen-jungle model.
21
+ Supports both unconditional and conditional generation.
22
+
23
+ Args:
24
+ input_audio_path (str): the audio filepath to be processed.
25
+ gen_duration (int): the duration of the generated audio.
26
+ prompt_duration (int): the duration of the input conditioning audio.
27
+
28
+ Returns:
29
+ output_audio_path (str): the filepath of the processed audio.
30
+ """
31
+ sig = AudioSignal(input_audio_path)
32
+ y, sr = sig.audio_data[0], sig.sample_rate
33
+
34
+ model.set_generation_params(
35
+ duration=gen_duration,
36
+ temperature=1.05,
37
+ cfg_coef=3,
38
+ )
39
+
40
+ if prompt_duration is None or prompt_duration == 0:
41
+ output = model.generate_unconditional(1)
42
+ else:
43
+ num_samples = int(prompt_duration * sr)
44
+ if y.shape[1] < num_samples:
45
+ raise ValueError("The existing audio is too short for the specified prompt duration.")
46
+
47
+ start_sample = y.shape[1] - num_samples
48
+ prompt_waveform = y[..., start_sample:]
49
+
50
+
51
+ output = model.generate_continuation(prompt_waveform, prompt_sample_rate=sr)
52
+
53
+ output = normalize_audio(
54
+ output,
55
+ sample_rate=model.sample_rate, # musicgen outputs at 32kHz
56
+ strategy="loudness",
57
+ loudness_headroom_db=10,
58
+ loudness_compressor=True,
59
+ )
60
+ sig.audio_data = output.cpu()
61
+ return save_and_return_filepath(sig)
62
+
63
+
64
+ with gr.Blocks() as demo:
65
+ inputs = [
66
+ gr.Audio(
67
+ label="Ignore Me: I only generate, I don't consume",
68
+ type='filepath'
69
+ ),
70
+ gr.Slider(
71
+ minimum=10,
72
+ maximum=30,
73
+ step=1,
74
+ value=10,
75
+ label="Generation Duration"
76
+ ),
77
+ gr.Slider(
78
+ minimum=0,
79
+ maximum=10,
80
+ step=1,
81
+ value=2,
82
+ label="Input Conditioning Duration"
83
+ ),
84
+ ]
85
+
86
+ output = gr.Audio(label='Audio Output', type='filepath')
87
+ widgets = build_endpoint(inputs, output, process_fn, card)
88
+
89
+ demo.queue()
90
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ -e git+https://github.com/audacitorch/pyharp.git#egg=pyharp
2
+ torch==2.1.0
3
+ descript-audiotools
4
+ audiocraft @ https://github.com/aaronabebe/audiocraft.git