import tensorflow as tf

class modules(object):
    def __init__(self):
        self.seq_len = 20
        self.vocab_size = 10
        self.embedding_size = 100
        self.initializer = tf.random_normal_initializer(stddev=0.02)
        self.trunc_norm_init = tf.truncated_normal_initializer(stddev=0.02)
        self.is_training = True
        pass

    def add_ops(self):
        self.ids_placeholder = tf.placeholder(tf.int32, shape=(None, None), name='ids_placeholder')

    def embedding_layer(self):
        with tf.device('/cpu:0'), tf.name_scope('embedding'):
            self.embedding_weights = tf.get_variable(
                                        'embedding_weights',
                                        [self.vocab_size, self.embedding_size],
                                        dtype=tf.float32
                                        )
            self.embedding = tf.nn.embedding_lookup(self.embedding_weights, self.ids_placeholder)

    def cnn_layer(self, input):
        if len(input.shape) < 3:
            input = tf.expand_dims(input, -1)
        num_filters = 100
        filter_sizes = [2, 3, 5]
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.variable_scope("cnn-pooling-%s" % filter_size):
                filter_shape = [filter_size, self.embedding_size, 1, num_filters]
                _filter = tf.get_variable("filter-%s" % filter_size, shape=filter_shape, initializer=self.initializer)
                # convolution output: [batch_size, kernel_size, 1, num_filters]
                conv = tf.nn.conv2d(input, _filter, strides=[1, 1, 1, 1], padding='VALID', name='conv')
                conv = tf.contrib.layers.batch_norm(conv, is_training=self.is_training, scope='cnn_bn')

                # apply non-linearity
                b = tf.get_variable('b-%s' % filter_size, [num_filters])
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')

                # max-pooling
                # max-pooling shape: [batch_size, 1, 1, num_filters]
                kernel_size = [1, self.seq_len - filter_size + 1, 1, 1]
                pooled = tf.nn.max_pool(h, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID', name="pool")
                pooled_outputs.append(pooled)

        # combine features
        num_features = num_filters * len(filter_sizes)
        hidden_output = tf.concat(pooled_outputs, axis=3)
        hidden_output = tf.reshape(hidden_output, [-1, num_features])  # [batch_size, num_features]

        if self.is_training:
            hidden_output = tf.nn.dropout(hidden_output, keep_prob=0.9)
        output = tf.layers.dense(hidden_output, num_features, activation=tf.nn.tanh, use_bias=True)

        return output

    def lstm_layer(self, inputs, inputs_len):
        '''
        :param input: [batch_size, seq_len, hidden_size]
        :param input_lens: [batch_size]
        :return: outputs: [batch_size, seq_len, num_units*2]
                 fw_st, bw_st: Each are LSTMStateTuples of shape ([batch_size,hidden_dim],[batch_size,hidden_dim])
        '''
        num_units = tf.shape(input)[-1]
        with tf.variable_scope('lstm_layer', reuse=tf.AUTO_REUSE):
            cell_fw = tf.nn.rnn_cell.LSTMCell(num_units=num_units, initializer=self.initializer, state_is_tuple=True)
            cell_bw = tf.nn.rnn_cell.LSTMCell(num_units=num_units, initializer=self.initializer, state_is_tuple=True)

            (outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(
                cell_fw=cell_fw,
                cell_bw=cell_bw,
                inputs=inputs,
                sequence_length=inputs_len,
                swap_memory=True,
                dtype=tf.float32)

            outputs = tf.concat(values=outputs, axis=-1)  # concatenate the forwards and backwards states

        return outputs, fw_st, bw_st

    def gru_layer(self, inputs, inputs_len):
        '''
        :param input: [batch_size, seq_len, hidden_size]
        :param input_lens: [batch_size]
        :return: outputs: ([batch_size, seq_len, num_units], [batch_size, seq_len, num_units])
                 state: ([batch_size, num_units], [batch_size, num_units])
        '''
        num_units = tf.shape(input)[-1]
        with tf.variable_scope('gru_layer', reuse=tf.AUTO_REUSE):
            cell_fw = tf.nn.rnn_cell.GRUCell(num_units=num_units)
            cell_bw = tf.nn.rnn_cell.GRUCell(num_units=num_units)

            outputs, state = tf.nn.bidirectional_dynamic_rnn(
                cell_fw=cell_fw,
                cell_bw=cell_bw,
                inputs=inputs,
                sequence_length=inputs_len,
                time_major=False,
                swap_memory=True)

        return outputs, state

    def _reduce_states(self, fw_st, bw_st):
        """Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder.
           This is needed because the encoder is bidirectional but the decoder is not.
        Args:
          fw_st: LSTMStateTuple with hidden_dim units.
          bw_st: LSTMStateTuple with hidden_dim units.

        Returns:
          state: LSTMStateTuple with hidden_dim units.
        """
        hidden_dim = tf.shape(fw_st)[-1]
        with tf.variable_scope('reduce_final_st'):
            # Define weights and biases to reduce the cell and reduce the state
            w_reduce_c = tf.get_variable('w_reduce_c', [hidden_dim * 2, hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
            w_reduce_h = tf.get_variable('w_reduce_h', [hidden_dim * 2, hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
            bias_reduce_c = tf.get_variable('bias_reduce_c', [hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
            bias_reduce_h = tf.get_variable('bias_reduce_h', [hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)

            # Apply linear layer
            old_c = tf.concat(axis=1, values=[fw_st.c, bw_st.c])  # Concatenation of fw and bw cell
            old_h = tf.concat(axis=1, values=[fw_st.h, bw_st.h])  # Concatenation of fw and bw state
            new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c)  # Get new cell from old cell
            new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h)  # Get new state from old state
            return tf.contrib.rnn.LSTMStateTuple(new_c, new_h)  # Return new cell and state


def loss_binary_classifier(logits, target, l2_lambda=0.0001):
    with tf.variable_scope('loss'):
        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=logits)
        loss = tf.reduce_mean(losses)
        l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda

    return loss + l2_loss


def loss_multilabel_classifier(logits, target, l2_lambda=0.0001):
    with tf.variable_scope('multi-loss'):
        losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=logits)
        losses = tf.reduce_sum(losses, axis=1)
        loss = tf.reduce_mean(losses)
        l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda

    return loss + l2_loss
