|
"""https://github.com/facebookresearch/audiocraft/blob/main/audiocraft/models/multibanddiffusion.py""" |
|
import logging |
|
from typing import Optional, List |
|
from math import ceil |
|
import torch |
|
import julius |
|
|
|
from tqdm import tqdm |
|
from audiocraft.models.encodec import CompressionModel |
|
from audiocraft.solvers.compression import CompressionSolver |
|
|
|
|
|
class BaseEncodecTokenizer: |
|
|
|
def __init__(self, |
|
codec_model: CompressionModel, |
|
sample_per_token: int = 320, |
|
num_codebooks_encoder: Optional[int] = None) -> None: |
|
"""Base class for multi-band diffusion. |
|
Args: |
|
codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens. |
|
sample_per_token (int): Number of sample per token (320 for 24kHz encodec). |
|
num_codebooks_encoder (int): Number of codebook to use for encoder (default full code). |
|
""" |
|
self.codec_model = codec_model |
|
self.device = next(self.codec_model.parameters()).device |
|
self.sample_per_token = sample_per_token |
|
self.num_codebooks_encoder = num_codebooks_encoder |
|
|
|
@property |
|
def sample_rate(self) -> int: |
|
return self.codec_model.sample_rate |
|
|
|
@torch.no_grad() |
|
def wav_to_tokens(self, |
|
wav: torch.Tensor, |
|
sample_rate: int, |
|
cpu_offload: bool = True, |
|
chunk_length: Optional[int] = None, |
|
stride: Optional[int] = None, |
|
concat_strategy: str = "first") -> torch.Tensor: |
|
"""Get audio tokens from waveform in batch. Note that Encodec generates 75 tokens per second of audio at 24 kHz |
|
meaning 320 samples (13.333 msec) per tokens. |
|
Args: |
|
wav (torch.Tensor): The audio that we want to extract the conditioning from (batch, channel, wav). |
|
sample_rate (int): Sample rate of the audio. |
|
cpu_offload (bool): Move the output tokens to cpu on the fly to save cuda memory. |
|
chunk_length (int): Chunk length to split a long audio (sample size, must be divisible by sample_per_token). |
|
stride (int): Stride over chunked audio (sample size, must be divisible by sample_per_token). |
|
concat_strategy (str): "first" or "last" to indicate which chunk to use when consolidating the overlap. |
|
""" |
|
|
|
if wav.ndim != 3: |
|
raise ValueError(f"wav should be (batch, channel, time): {wav.ndim} dims") |
|
original_device = wav.device |
|
|
|
if sample_rate != self.sample_rate: |
|
wav = julius.resample_frac(wav, sample_rate, self.sample_rate) |
|
batch_size, channels, input_length = wav.shape |
|
if channels > 1: |
|
logging.warning("Audio has more than one channel but encoder takes the first channel only.") |
|
|
|
if chunk_length: |
|
if chunk_length % self.sample_per_token != 0: |
|
raise ValueError(f"chunk_length must be divisible by {self.sample_per_token}: {chunk_length}") |
|
else: |
|
chunk_length = input_length |
|
chunk_length_latent = ceil(chunk_length / self.sample_per_token) |
|
if stride: |
|
if stride % self.sample_per_token != 0: |
|
raise ValueError(f"stride must be divisible by {self.sample_per_token}: {stride}") |
|
else: |
|
stride = chunk_length |
|
stride_latent = ceil(stride / self.sample_per_token) |
|
|
|
num_tokens = ceil(input_length / self.sample_per_token) |
|
num_filters = self.codec_model.model.config.num_filters |
|
if self.num_codebooks_encoder is not None: |
|
if self.num_codebooks_encoder > num_filters: |
|
raise ValueError(f"num_codebooks_encoder must be smaller than {num_filters}") |
|
num_filters = self.num_codebooks_encoder |
|
tokens = torch.zeros( |
|
(batch_size, num_filters, num_tokens), |
|
device="cpu" if cpu_offload else original_device, |
|
dtype=torch.int64 |
|
) |
|
|
|
for offset in tqdm(list(range(0, input_length - chunk_length + stride, stride))): |
|
frame = wav[:, :1, offset: offset + chunk_length] |
|
tmp_tokens, _ = self.codec_model.encode(frame.to(self.device)) |
|
offset_latent = int(offset / self.sample_per_token) |
|
tmp_tokens = tmp_tokens.to("cpu") if cpu_offload else tmp_tokens.to(original_device) |
|
if concat_strategy == "last" or offset == 0: |
|
tokens[:, :, offset_latent: offset_latent + chunk_length_latent] = tmp_tokens[:, :num_filters, :] |
|
else: |
|
overlap_token = chunk_length_latent - stride_latent |
|
tokens[:, :, offset_latent + overlap_token: offset_latent + chunk_length_latent] \ |
|
= tmp_tokens[:, :num_filters, overlap_token:] |
|
return tokens |
|
|
|
|
|
class EncodecTokenizer: |
|
|
|
@staticmethod |
|
def from_pretrained(num_codebooks_encoder: Optional[int] = None) -> BaseEncodecTokenizer: |
|
"""Get the pretrained Models for MultiBandDiffusion. |
|
Args: |
|
num_codebooks_encoder (int): Number of codebook to use for encoder (default full code). |
|
""" |
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
codec_model = CompressionSolver.model_from_checkpoint( |
|
'//pretrained/facebook/encodec_24khz', device=device |
|
) |
|
codec_model = codec_model.to(device) |
|
return BaseEncodecTokenizer( |
|
codec_model=codec_model, |
|
num_codebooks_encoder=num_codebooks_encoder |
|
) |
|
|