experiment-audio-tokenizer / multibanddiffusion.py
asahi417's picture
init
c96a100
raw
history blame contribute delete
No virus
14.3 kB
"""https://github.com/facebookresearch/audiocraft/blob/main/audiocraft/models/multibanddiffusion.py"""
import logging
from typing import List, Optional, Tuple
from math import ceil
import torch
import julius
from tqdm import tqdm
from audiocraft.modules.diffusion_schedule import NoiseSchedule
from audiocraft.models.unet import DiffusionUnet
from audiocraft.models.encodec import CompressionModel
from audiocraft.models.loaders import load_diffusion_models
from audiocraft.solvers.compression import CompressionSolver
from df.enhance import enhance, init_df, load_audio, save_audio # deepfilternet
class DFEnhancer:
"""Speech enhancer."""
def __init__(self):
self.model, self.df_state, _ = init_df()
self.sample_rate = self.df_state.sr()
def enhance_audio(self, audio: torch.Tensor, sample_rate: int) -> torch.Tensor:
if sample_rate != self.sample_rate:
audio = julius.resample_frac(audio, sample_rate, self.sample_rate)
enhanced_audio = []
for single_audio in audio:
enhanced_audio.append(enhance(self.model, self.df_state, single_audio))
return torch.stack(enhanced_audio)
class DiffusionProcess:
"""Sampling for a diffusion Model.
Args:
model (DiffusionUnet): Diffusion U-Net model.
noise_schedule (NoiseSchedule): Noise schedule for diffusion process.
"""
def __init__(self, model: DiffusionUnet, noise_schedule: NoiseSchedule) -> None:
self.model = model
self.schedule = noise_schedule
def generate(self, condition: torch.Tensor, initial_noise: torch.Tensor, step_size: int = 5) -> torch.Tensor:
"""Perform one diffusion process to generate one of the bands.
Args:
condition (torch.Tensor): The embeddings from the compression model.
initial_noise (torch.Tensor): The initial noise to start the process.
step_size (int): The number of the linearly spaced Markov chain steps.
"""
step_list = list(range(1000))[::-int(1000/step_size)] + [0]
return self.schedule.generate_subsampled(
model=self.model, initial=initial_noise, step_list=step_list, condition=condition
)
class BaseMultiBandDiffusion:
def __init__(self,
diffusion_processes: List[DiffusionProcess],
codec_model: CompressionModel,
sample_per_token: int = 320,
num_codebooks_decoder: int = 3,
num_codebooks_encoder: Optional[int] = None) -> None:
"""Base class for multi-band diffusion.
Args:
diffusion_processes (list of DiffusionProcess): Diffusion processes.
codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.
sample_per_token (int): Number of sample per token (320 for 24kHz encodec).
num_codebooks_decoder (int): Number of codebook to use for decoder.
num_codebooks_encoder (int): Number of codebook to use for encoder (default full code).
"""
self.diffusion_processes = diffusion_processes
self.codec_model = codec_model
self.device = next(self.codec_model.parameters()).device
self.sample_per_token = sample_per_token
self.num_codebooks_decoder = num_codebooks_decoder
self.num_codebooks_encoder = num_codebooks_encoder
self.enhancer = DFEnhancer()
@property
def sample_rate(self) -> int:
return self.codec_model.sample_rate
def generate(self, emb: torch.Tensor, size: torch.Size, step_size: int = 5) -> torch.Tensor:
"""Generate waveform audio from the latent embeddings of the compression model.
Args:
emb (torch.Tensor): Conditioning embeddings
size (None, torch.Size): Size of the output.
step_size (int): The number of the linearly spaced Markov chain steps.
"""
assert size[0] == emb.size(0)
out = torch.zeros(size).to(self.device)
for diffusion_process in self.diffusion_processes:
out += diffusion_process.generate(condition=emb, step_size=step_size, initial_noise=torch.randn_like(out))
return out
def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1) -> torch.Tensor:
"""Match the eq to the encodec output by matching the standard deviation of some frequency bands.
Args:
wav (torch.Tensor): Audio to equalize.
ref (torch.Tensor): Reference audio from which we match the spectrogram.
n_bands (int): Number of bands of the eq.
strictness (float): How strict the matching. 0 is no matching, 1 is exact matching.
"""
split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)
bands = split(wav)
bands_ref = split(ref)
out = torch.zeros_like(ref)
for i in range(n_bands):
out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness
return out
@torch.no_grad()
def wav_to_tokens(self,
wav: torch.Tensor,
sample_rate: int,
cpu_offload: bool = True,
chunk_length: Optional[int] = None,
stride: Optional[int] = None,
concat_strategy: str = "first") -> torch.Tensor:
"""Get audio tokens from waveform in batch. Note that Encodec generates 75 tokens per second of audio at 24 kHz
meaning 320 samples (13.333 msec) per tokens.
Args:
wav (torch.Tensor): The audio that we want to extract the conditioning from (batch, channel, wav).
sample_rate (int): Sample rate of the audio.
cpu_offload (bool): Move the output tokens to cpu on the fly to save cuda memory.
chunk_length (int): Chunk length to split a long audio (sample size, must be divisible by sample_per_token).
stride (int): Stride over chunked audio (sample size, must be divisible by sample_per_token).
concat_strategy (str): "first" or "last" to indicate which chunk to use when consolidating the overlap.
"""
# sanity check
if wav.ndim != 3:
raise ValueError(f"wav should be (batch, channel, time): {wav.ndim} dims")
original_device = wav.device
# sampling audio
if sample_rate != self.sample_rate:
wav = julius.resample_frac(wav, sample_rate, self.sample_rate)
batch_size, channels, input_length = wav.shape
if channels > 1:
logging.warning("Audio has more than one channel but encoder takes the first channel only.")
# validate chunk length and stride (if None, do one-shot process)
if chunk_length:
if chunk_length % self.sample_per_token != 0:
raise ValueError(f"chunk_length must be divisible by {self.sample_per_token}: {chunk_length}")
else:
chunk_length = input_length
chunk_length_latent = ceil(chunk_length / self.sample_per_token)
if stride:
if stride % self.sample_per_token != 0:
raise ValueError(f"stride must be divisible by {self.sample_per_token}: {stride}")
else:
stride = chunk_length
stride_latent = ceil(stride / self.sample_per_token)
# initialize the token tensor
num_tokens = ceil(input_length / self.sample_per_token)
num_filters = self.codec_model.model.config.num_filters
if self.num_codebooks_encoder is not None:
if self.num_codebooks_encoder > num_filters:
raise ValueError(f"num_codebooks_encoder must be smaller than {num_filters}")
num_filters = self.num_codebooks_encoder
tokens = torch.zeros(
(batch_size, num_filters, num_tokens),
device="cpu" if cpu_offload else original_device,
dtype=torch.int64
)
# tokenize by chunk in a sequential manner
for offset in tqdm(list(range(0, input_length - chunk_length + stride, stride))):
frame = wav[:, :1, offset: offset + chunk_length]
tmp_tokens, _ = self.codec_model.encode(frame.to(self.device))
offset_latent = int(offset / self.sample_per_token)
tmp_tokens = tmp_tokens.to("cpu") if cpu_offload else tmp_tokens.to(original_device)
if concat_strategy == "last" or offset == 0:
tokens[:, :, offset_latent: offset_latent + chunk_length_latent] = tmp_tokens[:, :num_filters, :]
else:
overlap_token = chunk_length_latent - stride_latent
tokens[:, :, offset_latent + overlap_token: offset_latent + chunk_length_latent] \
= tmp_tokens[:, :num_filters, overlap_token:]
return tokens
@torch.no_grad()
def tokens_to_wav(self,
tokens: torch.Tensor,
n_bands: int = 32,
step_size: int = 5,
cpu_offload: bool = True,
chunk_length: Optional[int] = None,
stride: Optional[int] = None,
concat_strategy: str = "crossfade",
skip_enhancer: bool = False) -> Tuple[torch.Tensor, float]:
"""Generate waveform audio with diffusion from the discrete codes in batch.
Args:
tokens (torch.Tensor): Discrete codes (batch, num_code, length).
n_bands (int): Bands for the eq matching.
step_size (int): Number of the linearly spaced Markov chain steps.
chunk_length (int): Chunk length to split a long audio.
stride (int): Stride over chunked audio.
cpu_offload (bool): Move the output tokens to cpu on the fly to save cuda memory.
concat_strategy (str): "first" or "last" to indicate which chunk to use when consolidating the overlap.
skip_enhancer (bool): Skip applying the enhancer.
"""
batch_size, num_filters, input_length = tokens.shape
if num_filters < self.num_codebooks_decoder:
raise ValueError(f"num_codebooks_decoder must be smaller than num_filters: {num_filters}")
original_device = tokens.device
# validate chunk length and stride (if None, do one-shot process)
chunk_length = chunk_length if chunk_length else input_length
chunk_length_wav = self.sample_per_token * chunk_length
stride = stride if stride else chunk_length
stride_wav = stride * self.sample_per_token
# initialize wav tensor
wav = torch.zeros(
(batch_size, 1, input_length * self.sample_per_token),
device="cpu" if cpu_offload else original_device,
dtype=torch.float32
)
# detokenize by chunk in a sequential manner
for offset in tqdm(list(range(0, input_length - chunk_length + stride, stride))):
tmp_tokens = tokens[:, :num_filters, offset: offset + chunk_length].to(self.device)
wav_encodec = self.codec_model.decode(tmp_tokens)
condition = self.codec_model.decode_latent(tmp_tokens)
wav_diffusion = self.generate(emb=condition, size=wav_encodec.size(), step_size=step_size)
tmp_wav = self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)
tmp_wav = tmp_wav.to("cpu") if cpu_offload else wav.to(original_device)
offset_wav = offset * self.sample_per_token
overlap_wav = chunk_length_wav - stride_wav
if concat_strategy == "last" or offset == 0:
wav[:, :, offset_wav: offset_wav + chunk_length_wav] = tmp_wav
elif concat_strategy == "crossfade":
fade_out = torch.linspace(1, 0, overlap_wav).unsqueeze(0).to(wav.device)
fade_in = torch.linspace(0, 1, overlap_wav).unsqueeze(0).to(wav.device)
tmp_wav[:, :, :overlap_wav] = (tmp_wav[:, :, :overlap_wav] * fade_in +
wav[:, :, offset_wav: offset_wav + overlap_wav] * fade_out)
wav[:, :, offset_wav: offset_wav + chunk_length_wav] = tmp_wav
else:
wav[:, :, offset_wav + overlap_wav: offset_wav + chunk_length_wav] = tmp_wav[:, :, overlap_wav:]
if skip_enhancer:
return wav, self.sample_rate
return self.enhancer.enhance_audio(wav, self.sample_rate), self.enhancer.sample_rate
class MultiBandDiffusion:
@staticmethod
def from_pretrained(num_codebooks_decoder: int = 3,
num_codebooks_encoder: Optional[int] = None,
mbd_model_alias: str = "mbd_comp_8.pt",
mbd_model_repo: str = "facebook/multiband-diffusion") -> BaseMultiBandDiffusion:
"""Get the pretrained Models for MultiBandDiffusion.
Args:
num_codebooks_decoder (int): Number of codebook to use for decoder.
num_codebooks_encoder (int): Number of codebook to use for encoder (default full code).
mbd_model_alias (str): Name of the MBD model weight.
see here https://huggingface.co/facebook/multiband-diffusion/tree/main
mbd_model_repo (str): Name of the MBD model repository.
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
codec_model = CompressionSolver.model_from_checkpoint(
'//pretrained/facebook/encodec_24khz', device=device
)
codec_model = codec_model.to(device)
models, processors, cfgs = load_diffusion_models(mbd_model_repo, filename=mbd_model_alias, device=device)
diffusion_processes = []
for i in range(len(models)):
schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)
diffusion_processes.append(DiffusionProcess(model=models[i], noise_schedule=schedule))
return BaseMultiBandDiffusion(
diffusion_processes=diffusion_processes,
codec_model=codec_model,
num_codebooks_decoder=num_codebooks_decoder,
num_codebooks_encoder=num_codebooks_encoder
)