import glob import json import logging import os import re import subprocess import sys import librosa import numpy as np import torch from scipy.io.wavfile import read from torch.nn import functional as F MATPLOTLIB_FLAG = False logging.basicConfig(stream=sys.stdout, level=logging.WARN) logger = logging f0_bin = 256 f0_max = 1100.0 f0_min = 50.0 f0_mel_min = 1127 * np.log(1 + f0_min / 700) f0_mel_max = 1127 * np.log(1 + f0_max / 700) def normalize_f0(f0, x_mask, uv, random_scale=True): # calculate means based on x_mask uv_sum = torch.sum(uv, dim=1, keepdim=True) uv_sum[uv_sum == 0] = 9999 means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum if random_scale: factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device) else: factor = torch.ones(f0.shape[0], 1).to(f0.device) # normalize f0 based on means and factor f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1) if torch.isnan(f0_norm).any(): exit(0) return f0_norm * x_mask def plot_data_to_numpy(x, y): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots(figsize=(10, 2)) plt.plot(x) plt.plot(y) plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def interpolate_f0(f0): ''' 对F0进行插值处理 ''' data = np.reshape(f0, (f0.size, 1)) vuv_vector = np.zeros((data.size, 1), dtype=np.float32) vuv_vector[data > 0.0] = 1.0 vuv_vector[data <= 0.0] = 0.0 ip_data = data frame_number = data.size last_value = 0.0 for i in range(frame_number): if data[i] <= 0.0: j = i + 1 for j in range(i + 1, frame_number): if data[j] > 0.0: break if j < frame_number - 1: if last_value > 0.0: step = (data[j] - data[i - 1]) / float(j - i) for k in range(i, j): ip_data[k] = data[i - 1] + step * (k - i + 1) else: for k in range(i, j): ip_data[k] = data[j] else: for k in range(i, frame_number): ip_data[k] = last_value else: ip_data[i] = data[i] last_value = data[i] return ip_data[:,0], vuv_vector[:,0] def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512): import parselmouth x = wav_numpy if p_len is None: p_len = x.shape[0]//hop_length else: assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error" time_step = hop_length / sampling_rate * 1000 f0_min = 50 f0_max = 1100 f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac( time_step=time_step / 1000, voicing_threshold=0.6, pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] pad_size=(p_len - len(f0) + 1) // 2 if(pad_size>0 or p_len - len(f0) - pad_size>0): f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') return f0 def resize_f0(x, target_len): source = np.array(x) source[source<0.001] = np.nan target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source) res = np.nan_to_num(target) return res def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512): import pyworld if p_len is None: p_len = wav_numpy.shape[0]//hop_length f0, t = pyworld.dio( wav_numpy.astype(np.double), fs=sampling_rate, f0_ceil=800, frame_period=1000 * hop_length / sampling_rate, ) f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate) for index, pitch in enumerate(f0): f0[index] = round(pitch, 1) return resize_f0(f0, p_len) def f0_to_coarse(f0): is_torch = isinstance(f0, torch.Tensor) f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 f0_mel[f0_mel <= 1] = 1 f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 f0_coarse = (f0_mel + 0.5).int() if is_torch else np.rint(f0_mel).astype(np.int) assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min()) return f0_coarse def get_hubert_model(): vec_path = "hubert/checkpoint_best_legacy_500.pt" print("load model(s) from {}".format(vec_path)) from fairseq import checkpoint_utils models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [vec_path], suffix="", ) model = models[0] model.eval() return model def get_hubert_content(hmodel, wav_16k_tensor): feats = wav_16k_tensor if feats.dim() == 2: # double channels feats = feats.mean(-1) assert feats.dim() == 1, feats.dim() feats = feats.view(1, -1) padding_mask = torch.BoolTensor(feats.shape).fill_(False) inputs = { "source": feats.to(wav_16k_tensor.device), "padding_mask": padding_mask.to(wav_16k_tensor.device), "output_layer": 9, # layer 9 } with torch.no_grad(): logits = hmodel.extract_features(**inputs) feats = hmodel.final_proj(logits[0]) return feats.transpose(1, 2) def get_content(cmodel, y): with torch.no_grad(): c = cmodel.extract_features(y.squeeze(1))[0] c = c.transpose(1, 2) return c def get_f0_predictor(f0_predictor,hop_length,sampling_rate,**kargs): if f0_predictor == "pm": from modules.F0Predictor.PMF0Predictor import PMF0Predictor f0_predictor_object = PMF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate) elif f0_predictor == "crepe": from modules.F0Predictor.CrepeF0Predictor import CrepeF0Predictor f0_predictor_object = CrepeF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate,device=kargs["device"],threshold=kargs["threshold"]) elif f0_predictor == "harvest": from modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor f0_predictor_object = HarvestF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate) elif f0_predictor == "dio": from modules.F0Predictor.DioF0Predictor import DioF0Predictor f0_predictor_object = DioF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate) else: raise Exception("Unknown f0 predictor") return f0_predictor_object def get_speech_encoder(speech_encoder,device=None,**kargs): if speech_encoder == "vec768l12": from vencoder.ContentVec768L12 import ContentVec768L12 speech_encoder_object = ContentVec768L12(device = device) elif speech_encoder == "vec256l9": from vencoder.ContentVec256L9 import ContentVec256L9 speech_encoder_object = ContentVec256L9(device = device) elif speech_encoder == "vec256l9-onnx": from vencoder.ContentVec256L9_Onnx import ContentVec256L9_Onnx speech_encoder_object = ContentVec256L9_Onnx(device = device) elif speech_encoder == "vec256l12-onnx": from vencoder.ContentVec256L12_Onnx import ContentVec256L12_Onnx speech_encoder_object = ContentVec256L12_Onnx(device = device) elif speech_encoder == "vec768l9-onnx": from vencoder.ContentVec768L9_Onnx import ContentVec768L9_Onnx speech_encoder_object = ContentVec768L9_Onnx(device = device) elif speech_encoder == "vec768l12-onnx": from vencoder.ContentVec768L12_Onnx import ContentVec768L12_Onnx speech_encoder_object = ContentVec768L12_Onnx(device = device) elif speech_encoder == "hubertsoft-onnx": from vencoder.HubertSoft_Onnx import HubertSoft_Onnx speech_encoder_object = HubertSoft_Onnx(device = device) elif speech_encoder == "hubertsoft": from vencoder.HubertSoft import HubertSoft speech_encoder_object = HubertSoft(device = device) elif speech_encoder == "whisper-ppg": from vencoder.WhisperPPG import WhisperPPG speech_encoder_object = WhisperPPG(device = device) elif speech_encoder == "cnhubertlarge": from vencoder.CNHubertLarge import CNHubertLarge speech_encoder_object = CNHubertLarge(device = device) elif speech_encoder == "dphubert": from vencoder.DPHubert import DPHubert speech_encoder_object = DPHubert(device = device) elif speech_encoder == "whisper-ppg-large": from vencoder.WhisperPPGLarge import WhisperPPGLarge speech_encoder_object = WhisperPPGLarge(device = device) else: raise Exception("Unknown speech encoder") return speech_encoder_object def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): assert os.path.isfile(checkpoint_path) checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') iteration = checkpoint_dict['iteration'] learning_rate = checkpoint_dict['learning_rate'] if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: optimizer.load_state_dict(checkpoint_dict['optimizer']) saved_state_dict = checkpoint_dict['model'] if hasattr(model, 'module'): state_dict = model.module.state_dict() else: state_dict = model.state_dict() new_state_dict = {} for k, v in state_dict.items(): try: # assert "dec" in k or "disc" in k # print("load", k) new_state_dict[k] = saved_state_dict[k] assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) except: print("error, %s is not in the checkpoint" % k) logger.info("%s is not in the checkpoint" % k) new_state_dict[k] = v if hasattr(model, 'module'): model.module.load_state_dict(new_state_dict) else: model.load_state_dict(new_state_dict) print("load ") logger.info("Loaded checkpoint '{}' (iteration {})".format( checkpoint_path, iteration)) return model, optimizer, learning_rate, iteration def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): logger.info("Saving model and optimizer state at iteration {} to {}".format( iteration, checkpoint_path)) if hasattr(model, 'module'): state_dict = model.module.state_dict() else: state_dict = model.state_dict() torch.save({'model': state_dict, 'iteration': iteration, 'optimizer': optimizer.state_dict(), 'learning_rate': learning_rate}, checkpoint_path) def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): """Freeing up space by deleting saved ckpts Arguments: path_to_models -- Path to the model directory n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth sort_by_time -- True -> chronologically delete ckpts False -> lexicographically delete ckpts """ ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) sort_key = time_key if sort_by_time else name_key x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key) to_del = [os.path.join(path_to_models, fn) for fn in (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") del_routine = lambda x: [os.remove(x), del_info(x)] rs = [del_routine(fn) for fn in to_del] def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): for k, v in scalars.items(): writer.add_scalar(k, v, global_step) for k, v in histograms.items(): writer.add_histogram(k, v, global_step) for k, v in images.items(): writer.add_image(k, v, global_step, dataformats='HWC') for k, v in audios.items(): writer.add_audio(k, v, global_step, audio_sampling_rate) def latest_checkpoint_path(dir_path, regex="G_*.pth"): f_list = glob.glob(os.path.join(dir_path, regex)) f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) x = f_list[-1] print(x) return x def plot_spectrogram_to_numpy(spectrogram): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots(figsize=(10,2)) im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation='none') plt.colorbar(im, ax=ax) plt.xlabel("Frames") plt.ylabel("Channels") plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def plot_alignment_to_numpy(alignment, info=None): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots(figsize=(6, 4)) im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not None: xlabel += '\n\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def load_wav_to_torch(full_path): sampling_rate, data = read(full_path) return torch.FloatTensor(data.astype(np.float32)), sampling_rate def load_filepaths_and_text(filename, split="|"): with open(filename, encoding='utf-8') as f: filepaths_and_text = [line.strip().split(split) for line in f] return filepaths_and_text def get_hparams_from_file(config_path): with open(config_path, "r") as f: data = f.read() config = json.loads(data) hparams =HParams(**config) return hparams def check_git_hash(model_dir): source_dir = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(os.path.join(source_dir, ".git")): logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( source_dir )) return cur_hash = subprocess.getoutput("git rev-parse HEAD") path = os.path.join(model_dir, "githash") if os.path.exists(path): saved_hash = open(path).read() if saved_hash != cur_hash: logger.warn("git hash values are different. {}(saved) != {}(current)".format( saved_hash[:8], cur_hash[:8])) else: open(path, "w").write(cur_hash) def get_logger(model_dir, filename="train.log"): global logger logger = logging.getLogger(os.path.basename(model_dir)) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") if not os.path.exists(model_dir): os.makedirs(model_dir) h = logging.FileHandler(os.path.join(model_dir, filename)) h.setLevel(logging.DEBUG) h.setFormatter(formatter) logger.addHandler(h) return logger def repeat_expand_2d(content, target_len): # content : [h, t] src_len = content.shape[-1] target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device) temp = torch.arange(src_len+1) * target_len / src_len current_pos = 0 for i in range(target_len): if i < temp[current_pos+1]: target[:, i] = content[:, current_pos] else: current_pos += 1 target[:, i] = content[:, current_pos] return target def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 from RVC # print(data1.max(),data2.max()) rms1 = librosa.feature.rms( y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 ) # 每半秒一个点 rms2 = librosa.feature.rms(y=data2.detach().cpu().numpy(), frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) rms1 = torch.from_numpy(rms1).to(data2.device) rms1 = F.interpolate( rms1.unsqueeze(0), size=data2.shape[0], mode="linear" ).squeeze() rms2 = torch.from_numpy(rms2).to(data2.device) rms2 = F.interpolate( rms2.unsqueeze(0), size=data2.shape[0], mode="linear" ).squeeze() rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) data2 *= ( torch.pow(rms1, torch.tensor(1 - rate)) * torch.pow(rms2, torch.tensor(rate - 1)) ) return data2 class HParams(): def __init__(self, **kwargs): for k, v in kwargs.items(): if type(v) == dict: v = HParams(**v) self[k] = v def keys(self): return self.__dict__.keys() def items(self): return self.__dict__.items() def values(self): return self.__dict__.values() def __len__(self): return len(self.__dict__) def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): return setattr(self, key, value) def __contains__(self, key): return key in self.__dict__ def __repr__(self): return self.__dict__.__repr__() def get(self,index): return self.__dict__.get(index) class Volume_Extractor: def __init__(self, hop_size = 512): self.hop_size = hop_size def extract(self, audio): # audio: 2d tensor array if not isinstance(audio,torch.Tensor): audio = torch.Tensor(audio) n_frames = int(audio.size(-1) // self.hop_size) audio2 = audio ** 2 audio2 = torch.nn.functional.pad(audio2, (int(self.hop_size // 2), int((self.hop_size + 1) // 2)), mode = 'reflect') volume = torch.FloatTensor([torch.mean(audio2[:,int(n * self.hop_size) : int((n + 1) * self.hop_size)]) for n in range(n_frames)]) volume = torch.sqrt(volume) return volume