Edit model card

Dance Diffusion is now available in 🧨 Diffusers.

FP32

# !pip install diffusers[torch] accelerate scipy
from diffusers import DiffusionPipeline
from scipy.io.wavfile import write

model_id = "harmonai/unlocked-250k"
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cuda")

audios = pipe(audio_length_in_s=4.0).audios

# To save locally
for i, audio in enumerate(audios):
    write(f"test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
    
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
    display(ipd.Audio(audio, rate=pipe.unet.sample_rate))

FP16

Faster at a small loss of quality

# !pip install diffusers[torch] accelerate scipy
from diffusers import DiffusionPipeline
from scipy.io.wavfile import write
import torch

model_id = "harmonai/unlocked-250k"
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cuda")

audios = pipeline(audio_length_in_s=4.0).audios

# To save locally
for i, audio in enumerate(audios):
    write(f"{i}.wav", pipe.unet.sample_rate, audio.transpose())
    
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
    display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
Downloads last month
0
Unable to determine this model’s pipeline type. Check the docs .

Spaces using harmonai/unlocked-250k 2