import tensorflow as tf
import config as cfg
from var_cnn_util import get_mask_3d


class BaseTTSModel(object):
    def __init__(self):
        self.x_d_ph = tf.placeholder(tf.float32,
                                     [None, None, cfg.duration_linguistic_dim])
        self.y_d_ph = tf.placeholder(tf.float32, [None, None, cfg.duration_dim])
        self.lu_d_ph = tf.placeholder(tf.int32, [None])
        self.x_a_ph = tf.placeholder(tf.float32,
                                     [None, None, cfg.a_in])
        self.y_a_ph = tf.placeholder(tf.float32, [None, None, cfg.acoustic_dim])
        self.lu_a_ph = tf.placeholder(tf.int32, [None])
        self.global_step = 1
        self.d_step = 1
        self.a_step = 1
        # self.keep_prob_ph = tf.placeholder(tf.float32, [])
        self.d_loss = None
        self.a_loss = None

        self.d_train_op = None
        self.a_train_op = None
        # self.global_steps = 1

        self.build_graph()

    def duration_model(self, x_d, seq_len=None, keep_prob=1):
        with tf.variable_scope('d_model', reuse=tf.AUTO_REUSE):
            hidden = x_d
            for i in range(cfg.num_hidden_layers):
                with tf.variable_scope('d_model_' + str(i),
                                       reuse=tf.AUTO_REUSE):
                    hidden = tf.layers.dense(hidden, cfg.hidden_size,
                                             activation=tf.nn.relu)
                    hidden = tf.nn.dropout(hidden, keep_prob=keep_prob)
            output = tf.layers.dense(hidden, cfg.duration_dim,
                                     activation=tf.nn.tanh)
            return output

    def acoustic_model(self, x_a, seq_len=None, keep_prob=1):
        with tf.variable_scope('a_model', reuse=tf.AUTO_REUSE):
            hidden = x_a
            for i in range(cfg.num_hidden_layers):
                with tf.variable_scope('a_model_' + str(i),
                                       reuse=tf.AUTO_REUSE):
                    hidden = tf.layers.dense(hidden, cfg.hidden_size,
                                             activation=tf.nn.relu)
                    hidden = tf.nn.dropout(hidden, keep_prob=keep_prob)
            output = tf.layers.dense(hidden, cfg.acoustic_dim,
                                     activation=tf.nn.tanh)
            return output

    def calc_d_loss(self):
        y_d_pred = self.duration_model(self.x_d_ph, keep_prob=cfg.keep_prob)

        mask = get_mask_3d(self.lu_d_ph, tf.shape(self.y_d_ph)[1],
                           self.y_d_ph.dtype)
        mse_loss = tf.losses.mean_squared_error(self.y_d_ph, y_d_pred, mask)

        return mse_loss

    def calc_a_loss(self):
        y_a_pred = self.acoustic_model(self.x_a_ph, seq_len=self.lu_a_ph,
                                       keep_prob=cfg.keep_prob)
        self.y_a_pred = y_a_pred
        mask = get_mask_3d(self.lu_a_ph, tf.shape(self.y_a_ph)[1],
                           self.y_a_ph.dtype)
        mse_loss = tf.losses.mean_squared_error(self.y_a_ph, y_a_pred, mask)
        return mse_loss

    def get_d_train_op(self):
        with tf.variable_scope('train_op_d', reuse=tf.AUTO_REUSE):
            tvars = tf.trainable_variables()
            vs = [var for var in tvars if 'd_model' in var.name]
            lr = tf.train.exponential_decay(
                cfg.lr,
                self.d_step,
                decay_steps=200,
                decay_rate=0.5)
            # vs = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,

            optimizer = tf.train.AdamOptimizer(lr)
            optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer,
                                                                    clip_norm=5.0)
            return optimizer.minimize(self.d_loss, var_list=vs)

    def get_a_train_op(self):
        with tf.variable_scope('train_op_a', reuse=tf.AUTO_REUSE):
            tvars = tf.trainable_variables()
            vs = [var for var in tvars if 'a_model' in var.name]
            lr = tf.train.exponential_decay(
                cfg.lr,
                self.a_step,
                decay_steps=200,
                decay_rate=0.5)
            optimizer = tf.train.AdamOptimizer(lr)
            optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer,
                                                                    clip_norm=5.0)
            return optimizer.minimize(self.a_loss, var_list=vs)

    def build_graph(self):
        self.d_loss = self.calc_d_loss()
        self.a_loss = self.calc_a_loss()
        self.d_train_op = self.get_d_train_op()
        self.a_train_op = self.get_a_train_op()
