vishred18's picture
Upload 364 files
d5ee97c
raw
history blame
No virus
18.4 kB
# -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Tacotron2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs.tacotron2 import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class Tacotron2Trainer(Seq2SeqBasedTrainer):
"""Tacotron2 Trainer class based on Seq2SeqBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(Tacotron2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"stop_token_loss",
"mel_loss_before",
"mel_loss_after",
"guided_attention_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def _train_step(self, batch):
"""Here we re-define _train_step because apply input_signature make
the training progress slower on my experiment. Note that input_signature
is apply on based_trainer by default.
"""
if self._already_apply_input_signature is False:
self.one_step_forward = tf.function(
self._one_step_forward, experimental_relax_shapes=True
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, experimental_relax_shapes=True
)
self.one_step_predict = tf.function(
self._one_step_predict, experimental_relax_shapes=True
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_evaluate_per_replica(self, batch):
"""One step evaluate per GPU
Tacotron-2 used teacher-forcing when training and evaluation.
So we need pass `training=True` for inference step.
"""
outputs = self._model(**batch, training=True)
_, dict_metrics_losses = self.compute_per_example_losses(batch, outputs)
self.update_eval_metrics(dict_metrics_losses)
def _one_step_predict_per_replica(self, batch):
"""One step predict per GPU
Tacotron-2 used teacher-forcing when training and evaluation.
So we need pass `training=True` for inference step.
"""
outputs = self._model(**batch, training=True)
return outputs
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
(
decoder_output,
post_mel_outputs,
stop_token_predictions,
alignment_historys,
) = outputs
mel_loss_before = calculate_3d_loss(
batch["mel_gts"], decoder_output, loss_fn=self.mae
)
mel_loss_after = calculate_3d_loss(
batch["mel_gts"], post_mel_outputs, loss_fn=self.mae
)
# calculate stop_loss
max_mel_length = (
tf.reduce_max(batch["mel_lengths"])
if self.config["use_fixed_shapes"] is False
else [self.config["max_mel_length"]]
)
stop_gts = tf.expand_dims(
tf.range(tf.reduce_max(max_mel_length), dtype=tf.int32), 0
) # [1, max_len]
stop_gts = tf.tile(
stop_gts, [tf.shape(batch["mel_lengths"])[0], 1]
) # [B, max_len]
stop_gts = tf.cast(
tf.math.greater_equal(stop_gts, tf.expand_dims(batch["mel_lengths"], 1)),
tf.float32,
)
stop_token_loss = calculate_2d_loss(
stop_gts, stop_token_predictions, loss_fn=self.binary_crossentropy
)
# calculate guided attention loss.
attention_masks = tf.cast(
tf.math.not_equal(batch["g_attentions"], -1.0), tf.float32
)
loss_att = tf.reduce_sum(
tf.abs(alignment_historys * batch["g_attentions"]) * attention_masks,
axis=[1, 2],
)
loss_att /= tf.reduce_sum(attention_masks, axis=[1, 2])
per_example_losses = (
stop_token_loss + mel_loss_before + mel_loss_after + loss_att
)
dict_metrics_losses = {
"stop_token_loss": stop_token_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
"guided_attention_loss": loss_att,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function for faster.
outputs = self.one_step_predict(batch)
(
decoder_output,
mel_outputs,
stop_token_predictions,
alignment_historys,
) = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = decoder_output.values[0].numpy()
mels_after = mel_outputs.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
alignment_historys = alignment_historys.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = decoder_output.numpy()
mels_after = mel_outputs.numpy()
mel_gts = mel_gts.numpy()
alignment_historys = alignment_historys.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after, alignment_history) in enumerate(
zip(mel_gts, mels_before, mels_after, alignment_historys), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plot figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title(f"Predicted Mel-before-Spectrogram @ {self.steps} steps")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title(f"Predicted Mel-after-Spectrogram @ {self.steps} steps")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
# plot alignment
figname = os.path.join(dirname, f"{idx}_alignment.png")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f"Alignment @ {self.steps} steps")
im = ax.imshow(
alignment_history, aspect="auto", origin="lower", interpolation="none"
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
parser.add_argument(
"--use-fal",
default=0,
type=int,
help="Use forced alignment guided attention loss or regular",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
args.use_fal = bool(args.use_fal)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = 0
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
align_query = "*-alignment.npy" if args.use_fal is True else ""
charactor_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
train_dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=config["use_fixed_shapes"],
align_query=align_query,
)
# update max_mel_length and max_char_length to config
config.update({"max_mel_length": int(train_dataset.max_mel_length)})
config.update({"max_char_length": int(train_dataset.max_char_length)})
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
train_dataset = train_dataset.create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=False, # don't need apply fixed shape for evaluation.
align_query=align_query,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = Tacotron2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model.
tacotron_config = Tacotron2Config(**config["tacotron2_params"])
tacotron2 = TFTacotron2(config=tacotron_config, name="tacotron2")
tacotron2._build()
tacotron2.summary()
if len(args.pretrained) > 1:
tacotron2.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for tacotron2
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=tacotron2, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()