import tensorflow as tf

import config
import model_base
import var_cnn_util
import attention


cnn1_kernel = [3, 3, 1, 8]
cnn2_kernel = [3, 3, 8, 8]
cnn3_kernel = [3, 3, 8, 16]
cnn4_kernel = [3, 3, 16, 16]
is_pooling_cnn = True


class AttentionCNN4Model(model_base.BaseModel):

    def var_conv2d_relu(self, inputs, w_conv, b_conv, seq_length):
        cnn_outputs, new_seq_len = var_cnn_util.var_cov2d(inputs, w_conv, strides=[1, 1, 1, 1],
                                                          padding='SAME', bias=b_conv,
                                                          seq_length=seq_length)
        return tf.nn.relu(cnn_outputs), new_seq_len

    def var_max_pool2x2(self, inputs, seq_length):
        return var_cnn_util.var_max_pool(inputs, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                                         padding='SAME',
                                         seq_length=seq_length)

    def cnn(self, inputs, seq_lens):
        inputs = tf.expand_dims(inputs, 3)
        with tf.name_scope('conv1'):
            w_conv1 = self.weight_variable(cnn1_kernel)
            b_conv1 = self.bias_variable(cnn1_kernel[-1:])
            h_conv1, seq_lens = self.var_conv2d_relu(inputs, w_conv1, b_conv1, seq_lens)
        if is_pooling_cnn:
            with tf.name_scope('pool1'):
                h_conv1, seq_lens = self.var_max_pool2x2(h_conv1, seq_lens)
        with tf.name_scope('conv2'):
            w_conv2 = self.weight_variable(cnn2_kernel)
            b_conv2 = self.bias_variable(cnn2_kernel[-1:])
            h_conv2, seq_lens = self.var_conv2d_relu(h_conv1, w_conv2, b_conv2, seq_lens)
        if is_pooling_cnn:
            with tf.name_scope('pool2'):
                h_conv2, seq_lens = self.var_max_pool2x2(h_conv2, seq_lens)
        with tf.name_scope('conv3'):
            w_conv3 = self.weight_variable(cnn3_kernel)
            b_conv3 = self.bias_variable(cnn3_kernel[-1:])
            h_conv3, seq_lens = self.var_conv2d_relu(h_conv2, w_conv3, b_conv3, seq_lens)
        if is_pooling_cnn:
            with tf.name_scope('pool3'):
                h_conv3, seq_lens = self.var_max_pool2x2(h_conv3, seq_lens)
        with tf.name_scope('conv4'):
            w_conv4 = self.weight_variable(cnn4_kernel)
            b_conv4 = self.bias_variable(cnn4_kernel[-1:])
            h_conv4, seq_lens = self.var_conv2d_relu(h_conv3, w_conv4, b_conv4, seq_lens)
        if is_pooling_cnn:
            with tf.name_scope('pool4'):
                h_conv4, seq_lens = self.var_max_pool2x2(h_conv4, seq_lens)
        h_cnn = tf.reshape(h_conv4, [tf.shape(h_conv4)[0], -1, h_conv4.shape[2] * h_conv4.shape[3]])
        return h_cnn, seq_lens



    # def rnn(self, inputs, seq_lens, hidden_size=config.rnn_hidden_size):
    #     with tf.name_scope('rnn'):
    #         rnn_cell = tf.nn.rnn_cell.GRUCell(hidden_size)
    #         outputs, state = tf.nn.bidirectional_dynamic_rnn(rnn_cell, rnn_cell, inputs,
    #                                                          sequence_length=seq_lens,
    #                                                          dtype=tf.float32)
    #         rng = tf.range(0, tf.shape(seq_lens)[0])
    #         indexes = tf.stack([rng, seq_lens - 1], axis=1, name="indexes")
    #         fw_outputs = tf.gather_nd(outputs[0], indexes)
    #         bw_outputs = outputs[1][:, 0]
    #         outputs_concat = tf.concat([fw_outputs, bw_outputs], axis=1)
    #         return outputs_concat

    def fc(self, inputs):
        output_d = len(config.emos)
        inputs_d = config.rnn_hidden_size * 2
        with tf.name_scope('fc1'):
            w_fc1 = self.weight_variable([inputs_d, config.fc1_b])
            b_fc1 = self.bias_variable([config.fc1_b])
            h_fc1 = tf.nn.relu(tf.matmul(inputs, w_fc1) + b_fc1)
        with tf.name_scope('dropout'):
            h_fc1_drop = tf.nn.dropout(h_fc1, self.fc_dropout_kprob)
        with tf.name_scope('fc2'):
            w_fc2 = self.weight_variable([config.fc1_b, output_d])
            b_fc2 = self.bias_variable([output_d])
            logits = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
        return logits

    def model(self, inputs, seq_lens):
        h_cnn, seq_lens = self.cnn(inputs, seq_lens)
        # h_rnn = self.rnn(h_cnn, seq_lens)
        h_attention, alphas = attention.attention(h_cnn, attention_hidden_size=20, seq_lens=seq_lens)
        logits = self.fc(h_attention)
        return logits

