File size: 9,091 Bytes
7453da0
cbbd024
c657020
9738ed3
540056e
7125d26
 
540056e
7125d26
 
 
 
 
 
 
 
 
 
 
540056e
7125d26
5301c9c
7125d26
 
 
 
5301c9c
 
a2c09c5
5301c9c
 
a15b3ce
99bd104
 
c657020
 
5e6dba7
 
 
 
 
 
9738ed3
 
 
55e712f
9738ed3
 
 
 
 
 
 
c657020
a15b3ce
bf18300
a15b3ce
 
bf18300
a15b3ce
 
bf18300
a15b3ce
9738ed3
540056e
cf39162
0a67e21
 
 
 
 
 
 
 
 
396214f
f83630e
396214f
 
d1d7297
396214f
f83630e
396214f
 
 
 
 
 
 
 
 
 
 
 
 
7125d26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f90609
319b833
8f90609
 
 
319b833
8f90609
 
 
 
 
396214f
80aa4e5
508045d
 
 
9e731de
 
 
72aa658
508045d
455006b
aef38d7
cbbd024
aef38d7
 
 
 
455006b
 
efc72c4
508045d
fc0768e
508045d
 
 
f83630e
682c6da
f7b9ef5
508045d
80aa4e5
 
 
aef38d7
682c6da
 
7125d26
682c6da
 
 
 
 
396214f
7125d26
 
 
 
 
 
8f90609
 
 
e8fd75c
f83630e
508045d
72aa658
aef38d7
b9bed89
 
 
 
3127104
b9bed89
 
 
 
 
 
 
72aa658
b9bed89
 
776a974
 
 
 
 
 
 
 
8f90609
7125d26
 
 
8f90609
 
7125d26
 
 
72aa658
776a974
5bddbaf
7125d26
cbbd024
5bddbaf
396214f
 
776a974
f83630e
306eb01
 
7125d26
 
 
 
 
976c666
306eb01
 
7125d26
5bddbaf
6a3a19b
306eb01
b9bed89
 
 
 
7125d26
 
b9bed89
 
5bddbaf
b9bed89
 
 
 
7125d26
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
import gradio as gr
import spaces
import json
import re
from gradio_client import Client
from moviepy.editor import VideoFileClip
from moviepy.audio.AudioClip import AudioClip

def extract_audio(video_in):
    input_video = video_in
    output_audio = 'audio.wav'
    
    # Open the video file and extract the audio
    video_clip = VideoFileClip(input_video)
    audio_clip = video_clip.audio
    
    # Save the audio as a .wav file
    audio_clip.write_audiofile(output_audio, fps=44100)  # Use 44100 Hz as the sample rate for .wav files  
    print("Audio extraction complete.")

    return 'audio.wav'



def get_caption(image_in):
    kosmos2_client = Client("https://ydshieh-kosmos-2.hf.space/")
    kosmos2_result = kosmos2_client.predict(
        image_in,	# str (filepath or URL to image) in 'Test Image' Image component
        "Detailed",	# str in 'Description Type' Radio component
        fn_index=4
    )

    print(f"KOSMOS2 RETURNS: {kosmos2_result}")

    with open(kosmos2_result[1], 'r') as f:
        data = json.load(f)
    
    reconstructed_sentence = []
    for sublist in data:
        reconstructed_sentence.append(sublist[0])

    full_sentence = ' '.join(reconstructed_sentence)
    #print(full_sentence)

    # Find the pattern matching the expected format ("Describe this image in detail:" followed by optional space and then the rest)...
    pattern = r'^Describe this image in detail:\s*(.*)$'
    # Apply the regex pattern to extract the description text.
    match = re.search(pattern, full_sentence)
    if match:
        description = match.group(1)
        print(description)
    else:
        print("Unable to locate valid description.")

    # Find the last occurrence of "."
    #last_period_index = full_sentence.rfind('.')

    # Truncate the string up to the last period
    #truncated_caption = full_sentence[:last_period_index + 1]

    # print(truncated_caption)
    #print(f"\n—\nIMAGE CAPTION: {truncated_caption}")
    
    return description

def get_caption_from_MD(image_in):
    client = Client("https://vikhyatk-moondream1.hf.space/")
    result = client.predict(
		image_in,	# filepath  in 'image' Image component
		"Describe precisely the image.",	# str  in 'Question' Textbox component
		api_name="/answer_question"
    )
    print(result)
    return result

def get_magnet(prompt):

    client = Client("https://fffiloni-magnet.hf.space/")
    result = client.predict(
        "facebook/magnet-small-10secs",	# Literal['facebook/magnet-small-10secs', 'facebook/magnet-medium-10secs', 'facebook/magnet-small-30secs', 'facebook/magnet-medium-30secs', 'facebook/audio-magnet-small', 'facebook/audio-magnet-medium']  in 'Model' Radio component
        "",	# str  in 'Model Path (custom models)' Textbox component
        prompt,	# str  in 'Input Text' Textbox component
        3,	# float  in 'Temperature' Number component
        0.9,	# float  in 'Top-p' Number component
        10,	# float  in 'Max CFG coefficient' Number component
        1,	# float  in 'Min CFG coefficient' Number component
        20,	# float  in 'Decoding Steps (stage 1)' Number component
        10,	# float  in 'Decoding Steps (stage 2)' Number component
        10,	# float  in 'Decoding Steps (stage 3)' Number component
        10,	# float  in 'Decoding Steps (stage 4)' Number component
        "prod-stride1 (new!)",	# Literal['max-nonoverlap', 'prod-stride1 (new!)']  in 'Span Scoring' Radio component
        api_name="/predict_full"
    )
    print(result)
    return result[1]

def get_audioldm(prompt):
    client = Client("https://haoheliu-audioldm2-text2audio-text2music.hf.space/")
    result = client.predict(
        prompt,	# str in 'Input text' Textbox component
        "Low quality.",	# str in 'Negative prompt' Textbox component
        10,	# int | float (numeric value between 5 and 15) in 'Duration (seconds)' Slider component
        3.5,	# int | float (numeric value between 0 and 7) in 'Guidance scale' Slider component
        45,	# int | float in 'Seed' Number component
        3,	# int | float (numeric value between 1 and 5) in 'Number waveforms to generate' Slider component
        fn_index=1
    )
    print(result)
    audio_result = extract_audio(result)
    return audio_result

def get_riffusion(prompt):
    client = Client("https://fffiloni-spectrogram-to-music.hf.space/")
    result = client.predict(
		prompt,	# str  in 'Musical prompt' Textbox component
		"",	# str  in 'Negative prompt' Textbox component
		None,	# filepath  in 'parameter_4' Audio component
		10,	# float (numeric value between 5 and 10) in 'Duration in seconds' Slider component
		api_name="/predict"
    )
    print(result)
    return result[1]
    
import re
import torch
from transformers import pipeline

zephyr_model = "HuggingFaceH4/zephyr-7b-beta"
mixtral_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"

pipe = pipeline("text-generation", model=zephyr_model, torch_dtype=torch.bfloat16, device_map="auto")

agent_maker_sys = f"""
You are an AI whose job is to help users create their own music which its genre will reflect the character or scene from an image described by users.
In particular, you need to respond succintly with few musical words, in a friendly tone, write a musical prompt for a music generation model.

For example, if a user says, "a picture of a man in a black suit and tie riding a black dragon", provide immediately a musical prompt corresponding to the image description. 
Immediately STOP after that. It should be EXACTLY in this format:
"A grand orchestral arrangement with thunderous percussion, epic brass fanfares, and soaring strings, creating a cinematic atmosphere fit for a heroic battle"
"""

instruction = f"""
<|system|>
{agent_maker_sys}</s>
<|user|>
"""

@spaces.GPU(enable_queue=True)
def get_musical_prompt(user_prompt):
    prompt = f"{instruction.strip()}\n{user_prompt}</s>"    
    outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
    cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL)
    
    print(f"SUGGESTED Musical prompt: {cleaned_text}")
    return cleaned_text.lstrip("\n")

def infer(image_in, chosen_model):
    gr.Info("Getting image caption with Kosmos2...")
    user_prompt = get_caption(image_in)
    
    gr.Info("Building a musical prompt according to the image caption ...")
    musical_prompt = get_musical_prompt(user_prompt)

    if chosen_model == "MAGNet" :
        gr.Info("Now calling MAGNet for music...")
        music_o = get_magnet(musical_prompt)
    elif chosen_model == "AudioLDM-2" :
        gr.Info("Now calling AudioLDM-2 for music...")
        music_o = get_magnet(musical_prompt)
    elif chosen_model == "Riffusion" :
        gr.Info("Now calling Riffusion for music...")
        music_o = get_riffusion(musical_prompt)
    
    return musical_prompt, music_o

demo_title = "Image to Music V2"
description = "Get music from a picture"

css = """
#col-container{
    margin: 0 auto;
    max-width: 980px;
    text-align: left;
}
"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.HTML(f"""
        <h2 style="text-align: center;">{demo_title}</h2>
        <p style="text-align: center;">{description}</p>
        """)
        
        with gr.Row():
            with gr.Column():
                image_in = gr.Image(
                    label = "Image reference",
                    type = "filepath",
                    elem_id = "image-in"
                )
                chosen_model = gr.Dropdown(
                    label = "Choose a model",
                    choices = [
                        "MAGNet",
                        "AudioLDM-2",
                        "Riffusion"
                    ],
                    value = "MAGNet"
                )
                submit_btn = gr.Button("Make music from my pic !")
            with gr.Column():
                caption = gr.Textbox(
                    label = "Inspirational musical prompt",
                    max_lines = 3
                )
                result = gr.Audio(
                    label = "Music"
                )
        with gr.Column():
            gr.Examples(
                examples = [
                    ["examples/monalisa.png", "MAGNet"],
                    ["examples/santa.png", "MAGNet"],
                    ["examples/ocean_poet.jpeg", "MAGNet"],
                    ["examples/winter_hiking.png", "MAGNet"],
                    ["examples/teatime.jpeg", "MAGNet"],
                    ["examples/news_experts.jpeg", "MAGNet"]
                ],
                fn = infer,
                inputs = [image_in, chosen_model],
                outputs = [caption, result],
                cache_examples = False
            )

    submit_btn.click(
        fn = infer,
        inputs = [
            image_in,
            chosen_model
        ],
        outputs =[
            caption,
            result
        ]
    )

demo.queue(max_size=16).launch(show_api=False)