import tensorflow as tf

import attention
import config
import model_base
import var_cnn_util

cnn_kernels = [[3, 3, 1, 8],
               [3, 3, 8, 8],
               [3, 3, 8, 16],
               [3, 3, 16, 16],
               [3, 3, 16, 24],
               [3, 3, 24, 32],
               [3, 3, 32, 40],
               [3, 3, 40, 48],
               [3, 3, 48, 56],
               [3, 3, 56, 64]]

is_pooling_cnn = False
is_layer_norm = True

fc_hidden = 64

max_pool_interval = 2


class AttentionCNNModel(model_base.BaseModel):

    def var_conv2d_relu(self, inputs, w_conv, b_conv, seq_length):
        cnn_outputs, new_seq_len = var_cnn_util.var_cov2d(inputs, w_conv, strides=[1, 1, 1, 1],
                                                          padding='SAME', bias=b_conv,
                                                          seq_length=seq_length)
        return tf.nn.relu(cnn_outputs), new_seq_len

    def var_max_pool2x2(self, inputs, seq_length):
        return var_cnn_util.var_max_pool(inputs, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                                         padding='SAME',
                                         seq_length=seq_length)

    def cnn(self, inputs, seq_lens):
        inputs = tf.expand_dims(inputs, 3)
        h_conv = inputs
        with tf.name_scope('conv'):
            for i, cnn_kernel in zip(range(len(cnn_kernels)), cnn_kernels):
                if i <= 5:
                    dev_str = '/device:GPU:1'
                else:
                    dev_str = '/device:GPU:2'
                with tf.device(dev_str):
                    w_conv = self.weight_variable(cnn_kernel)
                    b_conv = self.bias_variable(cnn_kernel[-1:])
                    h_conv, seq_lens = self.var_conv2d_relu(h_conv, w_conv, b_conv, seq_lens)
                    if i % max_pool_interval == 0 and i > 0:
                        if is_pooling_cnn:
                            h_conv, seq_lens = self.var_max_pool2x2(h_conv, seq_lens)
                        else:
                            h_conv = tf.nn.max_pool(value=h_conv, ksize=[1, 1, 2, 1],
                                                    strides=[1, 1, 2, 1],
                                                    padding='SAME')
                    if is_layer_norm:
                        h_conv = tf.contrib.layers.layer_norm(h_conv)
        h_cnn = tf.reshape(h_conv,
                           [tf.shape(h_conv)[0], -1, h_conv.shape[2] * h_conv.shape[3]])
        return h_cnn, seq_lens

    def fc(self, inputs):
        output_d = len(config.emos)
        inputs_d = inputs.get_shape().as_list()[1]
        with tf.name_scope('fc1'):
            w_fc1 = self.weight_variable([inputs_d, fc_hidden])
            b_fc1 = self.bias_variable([fc_hidden])
            h_fc1 = tf.nn.relu(tf.matmul(inputs, w_fc1) + b_fc1)
        with tf.name_scope('dropout'):
            h_fc1_drop = tf.nn.dropout(h_fc1, self.fc_dropout_kprob)
        with tf.name_scope('fc2'):
            w_fc2 = self.weight_variable([fc_hidden, output_d])
            b_fc2 = self.bias_variable([output_d])
            logits = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
        return logits

    def model(self, inputs, seq_lens):
        h_cnn, seq_lens = self.cnn(inputs, seq_lens)
        h_attention, alphas = attention.attention(h_cnn, attention_hidden_size=20,
                                                  seq_lens=seq_lens)
        with tf.device('/device:GPU:2'):
            logits = self.fc(h_attention)
        return logits
