import base_model
import config as cfg
import conv_util as cu
import tensorflow as tf


class UFANSModel(base_model.BaseTTSModel):

    def contraction_phase(self, x_a):
        conv_results = list()
        x = tf.expand_dims(x_a, 3)
        for i in range(cfg.u_net_layers):
            with tf.variable_scope('contraction_' + str(i),
                                   reuse=tf.AUTO_REUSE):
                conv_out = cu.conv_block(x)
                conv_results.append(conv_out)

                x = cu.avg_pooling2d(conv_out)
        return x, conv_results

    def expansive_phase(self, x, conv_results, keep_prob):
        for i in range(cfg.u_net_layers - 1, 0, -1):
            with tf.variable_scope('expansive_' + str(i),
                                   reuse=tf.AUTO_REUSE):
                x_up = cu.deconvolution(x)
                x_add = x_up + conv_results[i]
                x_drop = tf.nn.dropout(x_add, keep_prob=keep_prob)
                x = cu.conv_block(x_drop)
        if len(conv_results) > 0:
            with tf.variable_scope('expansive_0',
                                   reuse=tf.AUTO_REUSE):
                x_up = cu.deconvolution(x)
                x_add = x_up + conv_results[0]
                x_drop = tf.nn.dropout(x_add, keep_prob=keep_prob)
                x = cu.conv_block(x_drop, filters=32)
        return x

    def acoustic_model(self, x_a, seq_len=None, keep_prob=1):
        with tf.variable_scope('ufans_model', reuse=tf.AUTO_REUSE):
            x, conv_results = self.contraction_phase(x_a)
            x = self.expansive_phase(x, conv_results, keep_prob)
            x = tf.reshape(x, [tf.shape(x)[0], -1, x.shape[2] * x.shape[3]])
            output = tf.layers.dense(x, cfg.acoustic_dim,
                                     activation=tf.nn.tanh)
            return output

    def get_a_train_op(self):
        with tf.variable_scope('train_op_ufans', reuse=tf.AUTO_REUSE):
            tvars = tf.trainable_variables()
            vs = [var for var in tvars if 'ufans_model' in var.name]
            lr = tf.train.exponential_decay(
                cfg.lr,
                self.a_step,
                decay_steps=200,
                decay_rate=0.5)
            optimizer = tf.train.AdamOptimizer(lr)
            optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer,
                                                                    clip_norm=5.0)
            return optimizer.minimize(self.a_loss, var_list=vs)
