import os import glob import re import sys import argparse import logging import json import subprocess import warnings import random import functools import librosa import numpy as np from scipy.io.wavfile import read import torch from torch.nn import functional as F from modules.commons import sequence_mask MATPLOTLIB_FLAG = False logging.basicConfig(stream=sys.stdout, level=logging.WARN) logger = logging f0_bin = 256 f0_max = 1100.0 f0_min = 50.0 f0_mel_min = 1127 * np.log(1 + f0_min / 700) f0_mel_max = 1127 * np.log(1 + f0_max / 700) def normalize_f0(f0, x_mask, uv, random_scale=True): # calculate means based on x_mask uv_sum = torch.sum(uv, dim=1, keepdim=True) uv_sum[uv_sum == 0] = 9999 means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum if random_scale: factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device) else: factor = torch.ones(f0.shape[0], 1).to(f0.device) # normalize f0 based on means and factor f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1) if torch.isnan(f0_norm).any(): exit(0) return f0_norm * x_mask def plot_data_to_numpy(x, y): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots(figsize=(10, 2)) plt.plot(x) plt.plot(y) plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def f0_to_coarse(f0): is_torch = isinstance(f0, torch.Tensor) f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 f0_mel[f0_mel <= 1] = 1 f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 f0_coarse = (f0_mel + 0.5).int() if is_torch else np.rint(f0_mel).astype(np.int) assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min()) return f0_coarse def get_content(cmodel, y): with torch.no_grad(): c = cmodel.extract_features(y.squeeze(1))[0] c = c.transpose(1, 2) return c def get_f0_predictor(f0_predictor,hop_length,sampling_rate,**kargs): if f0_predictor == "pm": from modules.F0Predictor.PMF0Predictor import PMF0Predictor f0_predictor_object = PMF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate) elif f0_predictor == "crepe": from modules.F0Predictor.CrepeF0Predictor import CrepeF0Predictor f0_predictor_object = CrepeF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate,device=kargs["device"],threshold=kargs["threshold"]) elif f0_predictor == "harvest": from modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor f0_predictor_object = HarvestF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate) elif f0_predictor == "dio": from modules.F0Predictor.DioF0Predictor import DioF0Predictor f0_predictor_object = DioF0Predictor(hop_length=hop_length,sampling_rate=sampling_rate) else: raise Exception("Unknown f0 predictor") return f0_predictor_object def get_speech_encoder(speech_encoder,device=None,**kargs): if speech_encoder == "vec768l12": from vencoder.ContentVec768L12 import ContentVec768L12 speech_encoder_object = ContentVec768L12(device = device) elif speech_encoder == "vec256l9": from vencoder.ContentVec256L9 import ContentVec256L9 speech_encoder_object = ContentVec256L9(device = device) elif speech_encoder == "vec256l9-onnx": from vencoder.ContentVec256L9_Onnx import ContentVec256L9_Onnx speech_encoder_object = ContentVec256L9(device = device) elif speech_encoder == "vec256l12-onnx": from vencoder.ContentVec256L12_Onnx import ContentVec256L12_Onnx speech_encoder_object = ContentVec256L9(device = device) elif speech_encoder == "vec768l9-onnx": from vencoder.ContentVec768L9_Onnx import ContentVec768L9_Onnx speech_encoder_object = ContentVec256L9(device = device) elif speech_encoder == "vec768l12-onnx": from vencoder.ContentVec768L12_Onnx import ContentVec768L12_Onnx speech_encoder_object = ContentVec256L9(device = device) elif speech_encoder == "hubertsoft-onnx": from vencoder.HubertSoft_Onnx import HubertSoft_Onnx speech_encoder_object = HubertSoft(device = device) elif speech_encoder == "hubertsoft": from vencoder.HubertSoft import HubertSoft speech_encoder_object = HubertSoft(device = device) elif speech_encoder == "whisper-ppg": from vencoder.WhisperPPG import WhisperPPG speech_encoder_object = WhisperPPG(device = device) else: raise Exception("Unknown speech encoder") return speech_encoder_object def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): assert os.path.isfile(checkpoint_path) checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') iteration = checkpoint_dict['iteration'] learning_rate = checkpoint_dict['learning_rate'] if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: optimizer.load_state_dict(checkpoint_dict['optimizer']) saved_state_dict = checkpoint_dict['model'] if hasattr(model, 'module'): state_dict = model.module.state_dict() else: state_dict = model.state_dict() new_state_dict = {} for k, v in state_dict.items(): try: # assert "dec" in k or "disc" in k # print("load", k) new_state_dict[k] = saved_state_dict[k] assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) except: print("error, %s is not in the checkpoint" % k) logger.info("%s is not in the checkpoint" % k) new_state_dict[k] = v if hasattr(model, 'module'): model.module.load_state_dict(new_state_dict) else: model.load_state_dict(new_state_dict) print("load ") logger.info("Loaded checkpoint '{}' (iteration {})".format( checkpoint_path, iteration)) return model, optimizer, learning_rate, iteration def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): logger.info("Saving model and optimizer state at iteration {} to {}".format( iteration, checkpoint_path)) if hasattr(model, 'module'): state_dict = model.module.state_dict() else: state_dict = model.state_dict() torch.save({'model': state_dict, 'iteration': iteration, 'optimizer': optimizer.state_dict(), 'learning_rate': learning_rate}, checkpoint_path) def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): """Freeing up space by deleting saved ckpts Arguments: path_to_models -- Path to the model directory n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth sort_by_time -- True -> chronologically delete ckpts False -> lexicographically delete ckpts """ ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) sort_key = time_key if sort_by_time else name_key x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key) to_del = [os.path.join(path_to_models, fn) for fn in (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") del_routine = lambda x: [os.remove(x), del_info(x)] rs = [del_routine(fn) for fn in to_del] def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): for k, v in scalars.items(): writer.add_scalar(k, v, global_step) for k, v in histograms.items(): writer.add_histogram(k, v, global_step) for k, v in images.items(): writer.add_image(k, v, global_step, dataformats='HWC') for k, v in audios.items(): writer.add_audio(k, v, global_step, audio_sampling_rate) def latest_checkpoint_path(dir_path, regex="G_*.pth"): f_list = glob.glob(os.path.join(dir_path, regex)) f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) x = f_list[-1] print(x) return x def plot_spectrogram_to_numpy(spectrogram): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots(figsize=(10,2)) im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation='none') plt.colorbar(im, ax=ax) plt.xlabel("Frames") plt.ylabel("Channels") plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def plot_alignment_to_numpy(alignment, info=None): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np fig, ax = plt.subplots(figsize=(6, 4)) im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not None: xlabel += '\n\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def load_wav_to_torch(full_path): sampling_rate, data = read(full_path) return torch.FloatTensor(data.astype(np.float32)), sampling_rate def load_filepaths_and_text(filename, split="|"): with open(filename, encoding='utf-8') as f: filepaths_and_text = [line.strip().split(split) for line in f] return filepaths_and_text def get_hparams(init=True): parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', type=str, default="./configs/config.json", help='JSON file for configuration') parser.add_argument('-m', '--model', type=str, required=True, help='Model name') args = parser.parse_args() model_dir = os.path.join("./logs", args.model) if not os.path.exists(model_dir): os.makedirs(model_dir) config_path = args.config config_save_path = os.path.join(model_dir, "config.json") if init: with open(config_path, "r") as f: data = f.read() with open(config_save_path, "w") as f: f.write(data) else: with open(config_save_path, "r") as f: data = f.read() config = json.loads(data) hparams = HParams(**config) hparams.model_dir = model_dir return hparams def get_hparams_from_dir(model_dir): config_save_path = os.path.join(model_dir, "config.json") with open(config_save_path, "r") as f: data = f.read() config = json.loads(data) hparams =HParams(**config) hparams.model_dir = model_dir return hparams def get_hparams_from_file(config_path): with open(config_path, "r") as f: data = f.read() config = json.loads(data) hparams =HParams(**config) return hparams def check_git_hash(model_dir): source_dir = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(os.path.join(source_dir, ".git")): logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( source_dir )) return cur_hash = subprocess.getoutput("git rev-parse HEAD") path = os.path.join(model_dir, "githash") if os.path.exists(path): saved_hash = open(path).read() if saved_hash != cur_hash: logger.warn("git hash values are different. {}(saved) != {}(current)".format( saved_hash[:8], cur_hash[:8])) else: open(path, "w").write(cur_hash) def get_logger(model_dir, filename="train.log"): global logger logger = logging.getLogger(os.path.basename(model_dir)) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") if not os.path.exists(model_dir): os.makedirs(model_dir) h = logging.FileHandler(os.path.join(model_dir, filename)) h.setLevel(logging.DEBUG) h.setFormatter(formatter) logger.addHandler(h) return logger def repeat_expand_2d(content, target_len): # content : [h, t] src_len = content.shape[-1] target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device) temp = torch.arange(src_len+1) * target_len / src_len current_pos = 0 for i in range(target_len): if i < temp[current_pos+1]: target[:, i] = content[:, current_pos] else: current_pos += 1 target[:, i] = content[:, current_pos] return target def mix_model(model_paths,mix_rate,mode): mix_rate = torch.FloatTensor(mix_rate)/100 model_tem = torch.load(model_paths[0]) models = [torch.load(path)["model"] for path in model_paths] if mode == 0: mix_rate = F.softmax(mix_rate,dim=0) for k in model_tem["model"].keys(): model_tem["model"][k] = torch.zeros_like(model_tem["model"][k]) for i,model in enumerate(models): model_tem["model"][k] += model[k]*mix_rate[i] torch.save(model_tem,os.path.join(os.path.curdir,"output.pth")) return os.path.join(os.path.curdir,"output.pth") class HParams(): def __init__(self, **kwargs): for k, v in kwargs.items(): if type(v) == dict: v = HParams(**v) self[k] = v def keys(self): return self.__dict__.keys() def items(self): return self.__dict__.items() def values(self): return self.__dict__.values() def __len__(self): return len(self.__dict__) def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): return setattr(self, key, value) def __contains__(self, key): return key in self.__dict__ def __repr__(self): return self.__dict__.__repr__() def get(self,index): return self.__dict__.get(index) class Volume_Extractor: def __init__(self, hop_size = 512): self.hop_size = hop_size def extract(self, audio): # audio: 2d tensor array if not isinstance(audio,torch.Tensor): audio = torch.Tensor(audio) n_frames = int(audio.size(-1) // self.hop_size) audio2 = audio ** 2 audio2 = torch.nn.functional.pad(audio2, (int(self.hop_size // 2), int((self.hop_size + 1) // 2)), mode = 'reflect') volume = torch.FloatTensor([torch.mean(audio2[:,int(n * self.hop_size) : int((n + 1) * self.hop_size)]) for n in range(n_frames)]) volume = torch.sqrt(volume) return volume