import tensorflow as tf

class BaseModel(object):
    def __init__(self, kwargs):
        self.num_steps = kwargs['num_steps']
        self.lr = kwargs['lr']
        self.dropout_rate = kwargs['dropout']
        self.max_grad_norm = kwargs['max_grad_norm']

        self._add_ops()
        with tf.device('/gpu:0'):
            self._build_networks()
            self.add_train_op()


    def _add_ops(self):
        ''' Add placeholders to the graph. These are entry points for any input data. '''
        self.input = tf.placeholder(tf.int32, shape=(None, self.num_steps), name='input')
        self.input_len = tf.placeholder(tf.int32, [None], name='input_len')
        self.input_mask = tf.placeholder(tf.float32, [None, self.num_steps], name='input_mask')
        # self.input_len = tf.count_nozero(self.input, axis=-1, dtype=tf.int32)
        # self.input_mask = tf.cast(self.input > 0, dtype=tf.float32, name='input_mask')
        # self.input_mask = tf.sequence_mask(self.input_len, maxlen=self.num_steps, dtype=tf.float32)

        self.target = ...
        self.target_len = ...
        self.target_mask = ...

        pass

    def _build_networks(self):

        self.prob = ...
        self.loss = ...

        pass

    def add_train_op(self):
        tvars = tf.trainable_variables()
        gradients = tf.gradients(self.loss, tvars)

        with tf.device('/gpu:0'):
            grads, global_norm = tf.clip_by_global_norm(gradients, self.max_grad_norm)

        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        learning_rate = tf.train.exponential_decay(learning_rate=self.lr,
                                                   global_step=self.global_step,
                                                   decay_steps=1e4,
                                                   decay_rate=0.97)
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)

        with tf.device('/gpu:0'):
            self.train_op = optimizer.apply_gradients(zip(grads, tvars),
                                                      global_step=self.global_step,
                                                      name='train_step')

        # grads_and_vars = optimizer.compute_gradients(self.loss)
        # train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

    def _make_feed_dict(self, batch, is_eval=False):
        feed_dict = dict()
        feed_dict[self.input] = batch.input
        feed_dict[self.input_len] = batch.input_len
        feed_dict[self.input_mask] = batch.input_mask

        if not is_eval:
            feed_dict[self.target] = batch.target

        return feed_dict

    def train_step(self, sess, batch):
        feed_dict = self._make_feed_dict(batch)

        fetches = {'train_op': self.train_op,
                   'loss': self.loss,
                   'global_step': self.global_step
                  }
        return sess.run(fetches, feed_dict)

    def predict(self, sess, batch):
        feed_dict = self._make_feed_dict(batch, True)

        p = sess.run([self.prob], feed_dict)
        return p


def classifier(output, num_labels, labels, is_training=True):
    hidden_size = output.shape[-1].value

    output_weight = tf.get_variable('output_weights', [num_labels, hidden_size],
                                    initializer=tf.truncated_normal_initializer(stddev=0.02))
    output_bias = tf.get_variable('output_bias', [num_labels],
                                  initializer=tf.zeros_initializer())

    with tf.variable_scope('loss'):
        if is_training:
            output = tf.nn.dropout(output, keep_prob=0.9)

        logits = tf.matmul(output, output_weight, transpose_b=True)
        logits = tf.nn.bias_add(logits, output_bias)
        probabilities = tf.nn.softmax(logits, axis=-1)
        log_probs = tf.nn.log_softmax(logits, axis=-1)

        one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)

        per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
        loss = tf.reduce_mean(per_example_loss)

        return (loss, per_example_loss, logits, probabilities)
