import gradio as gr
import os
hf_token = os.environ.get('HF_TOKEN')
lpmc_client = gr.load("seungheondoh/LP-Music-Caps-demo", src="spaces")
from gradio_client import Client
client = Client("https://fffiloni-test-llama-api.hf.space/", api_key=hf_token)
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
pipe.to("cuda")
#pipe.enable_model_cpu_offload()
# if using torch < 2.0
# pipe.enable_xformers_memory_efficient_attention()
from pydub import AudioSegment
def cut_audio(input_path, output_path, max_duration=30000):
audio = AudioSegment.from_file(input_path)
if len(audio) > max_duration:
audio = audio[:max_duration]
audio.export(output_path, format="mp3")
return output_path
def solo_xd(prompt):
images = pipe(prompt=prompt).images[0]
return images
def infer(audio_file):
truncated_audio = cut_audio(audio_file, "trunc_audio.mp3")
cap_result = lpmc_client(
truncated_audio, # str (filepath or URL to file) in 'audio_path' Audio component
api_name="predict"
)
print(cap_result)
#summarize_q = f"""
#I'll give you a list of music descriptions. Create a summary reflecting the musical ambiance.
#Do not processs each segment, but provide a summary for the whole instead.
#Here's the list:
#{cap_result}
#"""
#summary_result = client.predict(
# summarize_q, # str in 'Message' Textbox component
# api_name="/chat_1"
#)
#print(f"SUMMARY: {summary_result}")
llama_q = """
[INST] <
Sends an audio into LP-Music-Caps
to generate a audio caption which is then translated to an illustrative image description with Llama2, and finally run through
Stable Diffusion XL to generate an image from the audio !
Note: Only the first 30 seconds of your audio will be used for inference.