File size: 5,772 Bytes
4a04b75 cad4a7b 4a04b75 cad4a7b 4a04b75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
"""https://github.com/facebookresearch/audiocraft/blob/main/audiocraft/models/multibanddiffusion.py"""
import logging
from typing import Optional, List
from math import ceil
import torch
import julius
from tqdm import tqdm
from audiocraft.models.encodec import CompressionModel
from audiocraft.solvers.compression import CompressionSolver
class BaseEncodecTokenizer:
def __init__(self,
codec_model: CompressionModel,
sample_per_token: int = 320,
num_codebooks_encoder: Optional[int] = None) -> None:
"""Base class for multi-band diffusion.
Args:
codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.
sample_per_token (int): Number of sample per token (320 for 24kHz encodec).
num_codebooks_encoder (int): Number of codebook to use for encoder (default full code).
"""
self.codec_model = codec_model
self.device = next(self.codec_model.parameters()).device
self.sample_per_token = sample_per_token
self.num_codebooks_encoder = num_codebooks_encoder
@property
def sample_rate(self) -> int:
return self.codec_model.sample_rate
@torch.no_grad()
def wav_to_tokens(self,
wav: torch.Tensor,
sample_rate: int,
cpu_offload: bool = True,
chunk_length: Optional[int] = None,
stride: Optional[int] = None,
concat_strategy: str = "first") -> torch.Tensor:
"""Get audio tokens from waveform in batch. Note that Encodec generates 75 tokens per second of audio at 24 kHz
meaning 320 samples (13.333 msec) per tokens.
Args:
wav (torch.Tensor): The audio that we want to extract the conditioning from (batch, channel, wav).
sample_rate (int): Sample rate of the audio.
cpu_offload (bool): Move the output tokens to cpu on the fly to save cuda memory.
chunk_length (int): Chunk length to split a long audio (sample size, must be divisible by sample_per_token).
stride (int): Stride over chunked audio (sample size, must be divisible by sample_per_token).
concat_strategy (str): "first" or "last" to indicate which chunk to use when consolidating the overlap.
"""
# sanity check
if wav.ndim != 3:
raise ValueError(f"wav should be (batch, channel, time): {wav.ndim} dims")
original_device = wav.device
# sampling audio
if sample_rate != self.sample_rate:
wav = julius.resample_frac(wav, sample_rate, self.sample_rate)
batch_size, channels, input_length = wav.shape
if channels > 1:
logging.warning("Audio has more than one channel but encoder takes the first channel only.")
# validate chunk length and stride (if None, do one-shot process)
if chunk_length:
if chunk_length % self.sample_per_token != 0:
raise ValueError(f"chunk_length must be divisible by {self.sample_per_token}: {chunk_length}")
else:
chunk_length = input_length
chunk_length_latent = ceil(chunk_length / self.sample_per_token)
if stride:
if stride % self.sample_per_token != 0:
raise ValueError(f"stride must be divisible by {self.sample_per_token}: {stride}")
else:
stride = chunk_length
stride_latent = ceil(stride / self.sample_per_token)
# initialize the token tensor
num_tokens = ceil(input_length / self.sample_per_token)
num_filters = self.codec_model.model.config.num_filters
if self.num_codebooks_encoder is not None:
if self.num_codebooks_encoder > num_filters:
raise ValueError(f"num_codebooks_encoder must be smaller than {num_filters}")
num_filters = self.num_codebooks_encoder
tokens = torch.zeros(
(batch_size, num_filters, num_tokens),
device="cpu" if cpu_offload else original_device,
dtype=torch.int64
)
# tokenize by chunk in a sequential manner
for offset in tqdm(list(range(0, input_length - chunk_length + stride, stride))):
frame = wav[:, :1, offset: offset + chunk_length]
tmp_tokens, _ = self.codec_model.encode(frame.to(self.device))
offset_latent = int(offset / self.sample_per_token)
tmp_tokens = tmp_tokens.to("cpu") if cpu_offload else tmp_tokens.to(original_device)
if concat_strategy == "last" or offset == 0:
tokens[:, :, offset_latent: offset_latent + chunk_length_latent] = tmp_tokens[:, :num_filters, :]
else:
overlap_token = chunk_length_latent - stride_latent
tokens[:, :, offset_latent + overlap_token: offset_latent + chunk_length_latent] \
= tmp_tokens[:, :num_filters, overlap_token:]
return tokens
class EncodecTokenizer:
@staticmethod
def from_pretrained(num_codebooks_encoder: Optional[int] = None) -> BaseEncodecTokenizer:
"""Get the pretrained Models for MultiBandDiffusion.
Args:
num_codebooks_encoder (int): Number of codebook to use for encoder (default full code).
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
codec_model = CompressionSolver.model_from_checkpoint(
'//pretrained/facebook/encodec_24khz', device=device
)
codec_model = codec_model.to(device)
return BaseEncodecTokenizer(
codec_model=codec_model,
num_codebooks_encoder=num_codebooks_encoder
)
|