from __future__ import absolute_import
from __future__ import print_function
from base.base_model import BaseModel

from keras.models import Model
from keras.layers import Input, Flatten, Dense, Lambda, Embedding, Conv1D, MaxPooling1D, concatenate, LSTM, \
    Bidirectional, Dot, BatchNormalization, Dropout, Average, dot, Activation, GlobalMaxPool1D, GlobalAvgPool1D,\
    Permute, Multiply, Add, Merge, TimeDistributed
from keras.activations import softmax
from keras.optimizers import Adadelta, Adam, RMSprop
from keras.regularizers import l2

from keras import backend as K


from sklearn.ensemble import ExtraTreesClassifier
from xgboost import XGBClassifier

import numpy as np
import os


def exponent_neg_manhattan_distance(vectors):
    x, y = vectors
    return K.exp(-K.sum(K.abs(x - y), axis=1, keepdims=True))


def euclidean_distance(vectors):
    x, y = vectors
    return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))


def eucl_dist_output_shape(shapes):
    shape1, shape2 = shapes
    return shape1[0], 1


def contrastive_loss(y_true, y_pred):
    margin = 1
    return K.mean(y_true * K.square(y_pred) +
                  (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))


def cosine_loss(y_true, y_pred):
    margin = 0.2
    return K.mean(y_true * (K.square(1 - y_pred) / 4) +
                  (1 - y_true) * K.square(K.maximum(y_pred - margin, 0)))


def acc(y_true, y_pred):
    return K.mean(K.equal(y_true, K.cast(y_pred < 0.65, y_true.dtype)))


# def cosine_acc(y_true, y_pred):
#     return K.mean(K.equal(y_true, K.cast(y_pred > 0, y_true.dtype)))


def attention_3d_block(hidden_states):
    # hidden_states.shape = (batch_size, time_steps, hidden_size)
    hidden_size = int(hidden_states.shape[2])
    # Inside dense layer
    #              hidden_states            dot               W            =>           score_first_part
    # (batch_size, time_steps, hidden_size) dot (hidden_size, hidden_size) => (batch_size, time_steps, hidden_size)
    # W is the trainable weight matrix of attention
    # Luong's multiplicative style score
    score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)
    #            score_first_part           dot        last_hidden_state     => attention_weights
    # (batch_size, time_steps, hidden_size) dot   (batch_size, hidden_size)  => (batch_size, time_steps)
    h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(
        hidden_states)
    score = dot([score_first_part, h_t], [2, 1], name='attention_score')
    attention_weights = Activation('softmax', name='attention_weight')(score)
    # (batch_size, time_steps, hidden_size) dot (batch_size, time_steps) => (batch_size, hidden_size)
    context_vector = dot([hidden_states, attention_weights], [1, 1], name='context_vector')
    pre_activation = concatenate([context_vector, h_t], name='attention_output')
    attention_vector = Dense(128, use_bias=False, activation='tanh',
                             name='attention_vector')(
        pre_activation)
    return attention_vector


class CNNSiameseModel(BaseModel):
    def __init__(self, config, vocabulary):
        super(CNNSiameseModel, self).__init__(config)
        self.vocabulary = vocabulary
        self.weights = np.load(os.path.join(self.config.embedding_path, 'atec_task1_100_dim.embeddings'))
        self.max_len = config.max_len
        self.build_model()

    def create_base_network(self):
        filter_lengths = [1, 2, 3, 4]
        input_seqs = Input(shape=(self.max_len,))
        embedding_layer = Embedding(len(self.vocabulary)+2, self.config.embedding_dim,
                                    weights=[self.weights])(input_seqs)
        conv_layers = []
        for filter_length in filter_lengths:
            conv_layer = Conv1D(filters=100, kernel_size=filter_length, padding='valid',
                                activation='relu', strides=1)(embedding_layer)
            maxpooling = MaxPooling1D(pool_size=self.max_len - filter_length + 1)(conv_layer)
            flatten = Flatten()(maxpooling)
            conv_layers.append(flatten)
        concatenate_layer = concatenate(inputs=conv_layers)
        dense_layer = Dense(128, activation='relu')(concatenate_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        self.model = Model([input_a, input_b], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])

    def build_model_without_compile(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        return Model([input_a, input_b], distance)


class BiLSTMSiameseModel(BaseModel):
    def __init__(self, config, vocabulary):
        super(BiLSTMSiameseModel, self).__init__(config)
        self.vocabulary = vocabulary
        self.weights = np.load(os.path.join(self.config.embedding_path, 'atec_task1_100_dim.embeddings'))
        self.max_len = config.max_len
        self.build_model()

    def create_base_network(self):
        input_seqs = Input(shape=(self.max_len,))
        embedding_layer = Embedding(len(self.vocabulary)+2,
                                    self.config.embedding_dim,
                                    weights=[self.weights],
                                    mask_zero=True)(input_seqs)
        bilstm_layer = Bidirectional(LSTM(128, return_sequences=True))(embedding_layer)
        bilstm_layer = attention_3d_block(bilstm_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        self.model = Model([input_a, input_b], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])

    def build_model_without_compile(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        return Model([input_a, input_b], distance)


class CNNBiLSTMSiameseModel(BaseModel):
    def __init__(self, config, vocabulary):
        super(CNNBiLSTMSiameseModel, self).__init__(config)
        self.vocabulary = vocabulary
        self.weights = np.load(os.path.join(self.config.embedding_path, 'atec_task1_100_dim.embeddings'))
        self.build_model()

    def create_base_network(self):
        filter_lengths = [1, 2, 3, 4]
        input_seqs = Input(shape=(self.config.max_len,))
        embedding_layer = Embedding(len(self.vocabulary)+2, self.config.embedding_dim, weights=[self.weights])(input_seqs)
        conv_layers = []
        for filter_length in filter_lengths:
            conv_layer = Conv1D(filters=100, kernel_size=filter_length, padding='valid',
                                activation='relu', strides=1)(embedding_layer)
            maxpooling = MaxPooling1D(pool_size=self.config.max_len - filter_length + 1)(conv_layer)
            conv_layers.append(maxpooling)
        concatenate_layer = concatenate(inputs=conv_layers)
        bilstm_layer = Bidirectional(LSTM(128))(concatenate_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        self.model = Model([input_a, input_b], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])

    def build_model_without_compile(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        return Model([input_a, input_b], distance)


class SiameseCategoricalModel(BaseModel):
    def __init__(self, config, vocabulary):
        super(SiameseCategoricalModel, self).__init__(config)
        self.vocabulary = vocabulary
        self.weights = np.load(os.path.join(self.config.embedding_path, 'atec_task1_100_dim.embeddings'))
        self.build_model()

    def create_base_network(self):
        input_seqs = Input(shape=(self.config.max_len,))
        embedding_layer = Embedding(len(self.vocabulary)+2,
                                    self.config.embedding_dim,
                                    weights=[self.weights],
                                    mask_zero=True)(input_seqs)
        bilstm_layer = Bidirectional(LSTM(128))(embedding_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        vector_dot = Dot(axes=1, normalize=True)([vector_a, vector_b])
        vector_merged = concatenate(inputs=[vector_a, vector_b, vector_dot])
        dense_layer_1 = Dense(128, activation='relu')(vector_merged)
        dense_layer_2 = Dense(64, activation='relu')(dense_layer_1)

        output_layer = Dense(1, activation='sigmoid')(dense_layer_2)
        self.model = Model([input_a, input_b], output_layer)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=self.config.optimizer,
                           metrics=['acc'])

    def build_model_without_compile(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)
        vector_dot = Dot(axes=1, normalize=True)([vector_a, vector_b])
        vector_merged = concatenate(inputs=[vector_a, vector_b, vector_dot])
        dense_layer_1 = Dense(128, activation='relu')(vector_merged)
        dense_layer_2 = Dense(64, activation='relu')(dense_layer_1)

        output_layer = Dense(1, activation='sigmoid')(dense_layer_2)
        return Model([input_a, input_b], output_layer)


class SiameseMixedCNNDistanceModel(BaseModel):
    def __init__(self, config, vocabulary_word, vocabulary_char):
        super(SiameseMixedCNNDistanceModel, self).__init__(config)
        self.vocabulary_word = vocabulary_word
        self.vocabulary_char = vocabulary_char
        self.weights_word = np.load(os.path.join(self.config.embedding_path_word, 'atec_task1_100_dim.embeddings'))
        self.weights_char = np.load(os.path.join(self.config.embedding_path_char, 'atec_task1_100_dim.embeddings'))
        self.build_model()

    def create_base_network_word(self):
        filter_lengths = [1, 2, 3, 4]
        input_seqs = Input(shape=(self.config.max_len_word,))
        embedding_layer = Embedding(len(self.vocabulary_word) + 2, self.config.embedding_dim,
                                    weights=[self.weights_word])(input_seqs)
        conv_layers = []
        for filter_length in filter_lengths:
            conv_layer = Conv1D(filters=100, kernel_size=filter_length, padding='valid',
                                activation='relu', strides=1)(embedding_layer)
            maxpooling = MaxPooling1D(pool_size=self.config.max_len_word - filter_length + 1)(conv_layer)
            flatten = Flatten()(maxpooling)
            conv_layers.append(flatten)
        concatenate_layer = concatenate(inputs=conv_layers)
        dense_layer = Dense(128, activation='relu')(concatenate_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def create_base_network_char(self):
        filter_lengths = [1, 2, 3, 4]
        input_seqs = Input(shape=(self.config.max_len_char,))
        embedding_layer = Embedding(len(self.vocabulary_char) + 2, self.config.embedding_dim,
                                    weights=[self.weights_char])(input_seqs)
        conv_layers = []
        for filter_length in filter_lengths:
            conv_layer = Conv1D(filters=100, kernel_size=filter_length, padding='valid',
                                activation='relu', strides=1)(embedding_layer)
            maxpooling = MaxPooling1D(pool_size=self.config.max_len_char - filter_length + 1)(conv_layer)
            flatten = Flatten()(maxpooling)
            conv_layers.append(flatten)
        concatenate_layer = concatenate(inputs=conv_layers)
        dense_layer = Dense(128, activation='relu')(concatenate_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_a = concatenate([vector_a_word, vector_a_char])
        vector_b = concatenate([vector_b_word, vector_b_char])
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        self.model = Model([input_a_word, input_b_word, input_a_char, input_b_char], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])

    def build_mode_without_compile(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_a = concatenate([vector_a_word, vector_a_char])
        vector_b = concatenate([vector_b_word, vector_b_char])
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        return Model([input_a_word, input_b_word, input_a_char, input_b_char], distance)


class SiameseMixedBiLSTMDistanceModel(BaseModel):
    def __init__(self, config, vocabulary_word, vocabulary_char):
        super(SiameseMixedBiLSTMDistanceModel, self).__init__(config)
        self.vocabulary_word = vocabulary_word
        self.vocabulary_char = vocabulary_char
        self.weights_word = np.load(os.path.join(self.config.embedding_path_word, 'atec_task1_100_dim.embeddings'))
        self.weights_char = np.load(os.path.join(self.config.embedding_path_char, 'atec_task1_100_dim.embeddings'))
        self.build_model()

    def create_base_network_word(self):
        input_seqs = Input(shape=(self.config.max_len_word,))
        embedding_layer = Embedding(len(self.vocabulary_word) + 2,
                                    self.config.embedding_dim,
                                    weights=[self.weights_word],
                                    mask_zero=True)(input_seqs)
        bilstm_layer = Bidirectional(LSTM(128))(embedding_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def create_base_network_char(self):
        input_seqs = Input(shape=(self.config.max_len_char,))
        embedding_layer = Embedding(len(self.vocabulary_char)+2,
                                    self.config.embedding_dim,
                                    weights=[self.weights_char],
                                    mask_zero=True)(input_seqs)
        bilstm_layer = Bidirectional(LSTM(128))(embedding_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_a = concatenate([vector_a_word, vector_a_char])
        vector_b = concatenate([vector_b_word, vector_b_char])
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        self.model = Model([input_a_word, input_b_word, input_a_char, input_b_char], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])

    def build_model_without_compile(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_a = concatenate([vector_a_word, vector_a_char])
        vector_b = concatenate([vector_b_word, vector_b_char])
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        return Model([input_a_word, input_b_word, input_a_char, input_b_char], distance)


class SiameseMixedCNNBiLSTMDistanceModel(BaseModel):
    def __init__(self, config, vocabulary_word, vocabulary_char):
        super(SiameseMixedCNNBiLSTMDistanceModel, self).__init__(config)
        self.vocabulary_word = vocabulary_word
        self.vocabulary_char = vocabulary_char
        self.weights_word = np.load(os.path.join(self.config.embedding_path_word, 'atec_task1_100_dim.embeddings'))
        self.weights_char = np.load(os.path.join(self.config.embedding_path_char, 'atec_task1_100_dim.embeddings'))
        self.build_model()

    def create_base_network_word(self):
        filter_lengths = [1, 2, 3, 4]
        input_seqs = Input(shape=(self.config.max_len_word,))
        embedding_layer = Embedding(len(self.vocabulary_word) + 2, self.config.embedding_dim,
                                    weights=[self.weights_word])(input_seqs)
        conv_layers = []
        for filter_length in filter_lengths:
            conv_layer = Conv1D(filters=100, kernel_size=filter_length, padding='valid',
                                activation='relu', strides=1)(embedding_layer)
            maxpooling = MaxPooling1D(pool_size=self.config.max_len_word - filter_length + 1)(conv_layer)
            conv_layers.append(maxpooling)
        concatenate_layer = concatenate(inputs=conv_layers)
        bilstm_layer = Bidirectional(LSTM(128))(concatenate_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def create_base_network_char(self):
        filter_lengths = [1, 2, 3, 4]
        input_seqs = Input(shape=(self.config.max_len_char,))
        embedding_layer = Embedding(len(self.vocabulary_char) + 2, self.config.embedding_dim,
                                    weights=[self.weights_char])(input_seqs)
        conv_layers = []
        for filter_length in filter_lengths:
            conv_layer = Conv1D(filters=100, kernel_size=filter_length, padding='valid',
                                activation='relu', strides=1)(embedding_layer)
            maxpooling = MaxPooling1D(pool_size=self.config.max_len_char - filter_length + 1)(conv_layer)
            conv_layers.append(maxpooling)
        concatenate_layer = concatenate(inputs=conv_layers)
        bilstm_layer = Bidirectional(LSTM(128))(concatenate_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_a = concatenate([vector_a_word, vector_a_char])
        vector_b = concatenate([vector_b_word, vector_b_char])
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        self.model = Model([input_a_word, input_b_word, input_a_char, input_b_char], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])

    def build_model_without_compile(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_a = concatenate([vector_a_word, vector_a_char])
        vector_b = concatenate([vector_b_word, vector_b_char])
        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([vector_a, vector_b])
        return Model([input_a_word, input_b_word, input_a_char, input_b_char], distance)


class SiameseMixedCategoricalModel(BaseModel):
    def __init__(self, config, vocabulary_word, vocabulary_char):
        super(SiameseMixedCategoricalModel, self).__init__(config)
        self.vocabulary_word = vocabulary_word
        self.vocabulary_char = vocabulary_char
        self.weights_word = np.load(os.path.join(self.config.embedding_path_word, 'atec_task1_100_dim.embeddings'))
        self.weights_char = np.load(os.path.join(self.config.embedding_path_char, 'atec_task1_100_dim.embeddings'))
        self.build_model()

    def create_base_network_word(self):
        input_seqs = Input(shape=(self.config.max_len,))
        embedding_layer = Embedding(len(self.vocabulary_word) + 2,
                                    self.config.embedding_dim,
                                    weights=[self.weights_word],
                                    mask_zero=True)(input_seqs)
        bilstm_layer = Bidirectional(LSTM(128))(embedding_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def create_base_network_char(self):
        input_seqs = Input(shape=(self.config.max_len,))
        embedding_layer = Embedding(len(self.vocabulary_char) + 2,
                                    self.config.embedding_dim,
                                    weights=[self.weights_char],
                                    mask_zero=True)(input_seqs)
        bilstm_layer = Bidirectional(LSTM(128))(embedding_layer)
        dense_layer = Dense(128, activation='relu')(bilstm_layer)
        seqs_vector = Dense(128, activation='relu')(dense_layer)

        return Model(input_seqs, seqs_vector)

    def build_model(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_dot_word = Dot(axes=1, normalize=True)([vector_a_word, vector_b_word])
        vector_dot_char = Dot(axes=1, normalize=True)([vector_a_char, vector_b_char])
        vector_merged = concatenate(inputs=[vector_a_word, vector_b_word, vector_a_char, vector_b_char,
                                            vector_dot_word, vector_dot_char])
        dense_layer_1 = Dense(128, activation='relu')(vector_merged)
        dense_layer_2 = Dense(64, activation='relu')(dense_layer_1)

        output_layer = Dense(1, activation='sigmoid')(dense_layer_2)
        self.model = Model([input_a_word, input_b_word, input_a_char, input_b_char], output_layer)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=self.config.optimizer,
                           metrics=['acc'])

    def build_model_without_compile(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))
        base_network_word = self.create_base_network_word()
        base_network_char = self.create_base_network_char()
        vector_a_word = base_network_word(input_a_word)
        vector_b_word = base_network_word(input_b_word)
        vector_a_char = base_network_char(input_a_char)
        vector_b_char = base_network_char(input_b_char)
        vector_dot_word = Dot(axes=1, normalize=True)([vector_a_word, vector_b_word])
        vector_dot_char = Dot(axes=1, normalize=True)([vector_a_char, vector_b_char])
        vector_merged = concatenate(inputs=[vector_a_word, vector_b_word, vector_a_char, vector_b_char,
                                            vector_dot_word, vector_dot_char])
        dense_layer_1 = Dense(128, activation='relu')(vector_merged)
        dense_layer_2 = Dense(64, activation='relu')(dense_layer_1)

        output_layer = Dense(1, activation='sigmoid')(dense_layer_2)
        return Model([input_a_word, input_b_word, input_a_char, input_b_char], output_layer)


class EnsembleModel(BaseModel):
    def __init__(self, config, vocabulary_word, vocabulary_char, word_models=None, char_models=None):
        super(EnsembleModel, self).__init__(config)
        self.vocabulary_word = vocabulary_word
        self.vocabulary_char = vocabulary_char
        self.weights_word = np.load(os.path.join(self.config.embedding_path_word, 'atec_task1_100_dim.embeddings'))
        self.weights_char = np.load(os.path.join(self.config.embedding_path_char, 'atec_task1_100_dim.embeddings'))
        self.word_models = word_models
        self.char_models = char_models
        self.build_model()

    def build_model(self):
        input_a_word = Input(shape=(self.config.max_len_word,))
        input_b_word = Input(shape=(self.config.max_len_word,))
        input_a_char = Input(shape=(self.config.max_len_char,))
        input_b_char = Input(shape=(self.config.max_len_char,))

        model_outputs = list()

        if self.word_models:
            for word_model in self.word_models:
                model_outputs.append(word_model([input_a_word, input_b_word]))

        if self.char_models:
            for char_model in self.char_models:
                model_outputs.append(char_model([input_a_char, input_b_char]))

        vector_merged = concatenate(inputs=model_outputs)
        dense_layer_1 = Dense(6)(vector_merged)

        output_layer = Dense(1, activation='sigmoid')(dense_layer_1)
        self.model = Model([input_a_word, input_b_word, input_a_char, input_b_char], output_layer)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=self.config.optimizer,
                           metrics=['acc'])


class BiLSTMAttSiameseModel(BaseModel):
    def __init__(self, config, vocabulary):
        super(BiLSTMAttSiameseModel, self).__init__(config)
        self.vocabulary = vocabulary
        self.weights = np.load(os.path.join(self.config.embedding_path, 'atec_task1_100_dim.embeddings'))
        self.max_len = config.max_len
        self.build_model()

    def create_base_network(self):
        input_seqs = Input(shape=(self.max_len,))
        embedding_layer = Embedding(len(self.vocabulary)+2,
                                    self.config.embedding_dim,
                                    weights=[self.weights],
                                    mask_zero=False)(input_seqs)
        embedding_layer = Dropout(0.2)(embedding_layer)
        bilstm_layer = Bidirectional(LSTM(128, return_sequences=True, recurrent_regularizer=l2(0.01)))(embedding_layer)

        return Model(input_seqs, bilstm_layer)

    def build_model(self):
        input_a = Input(shape=(self.config.max_len,))
        input_b = Input(shape=(self.config.max_len,))
        base_network = self.create_base_network()
        vector_a = base_network(input_a)
        vector_b = base_network(input_b)

        attention = Dot(axes=-1)([vector_a, vector_b])

        wb = Lambda(lambda x: softmax(x, axis=1), output_shape=lambda x: x)(attention)
        wa = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2), output_shape=lambda x: x)(attention))
        sent1_ = Dot(axes=1)([wa, vector_b])
        sent2_ = Dot(axes=1)([wb, vector_a])

        neg = Lambda(lambda x: -x, output_shape=lambda x: x)
        substract1 = Add()([vector_a, neg(sent1_)])
        mutiply1 = Multiply()([vector_a, sent1_])
        substract2 = Add()([vector_b, neg(sent2_)])
        mutiply2 = Multiply()([vector_b, sent2_])

        m_sent1 = concatenate([vector_a, sent1_, substract1, mutiply1], axis=-1)
        m_sent2 = concatenate([vector_b, sent2_, substract2, mutiply2], axis=-1)

        # m_sent1 = TimeDistributed(Dense(128, activation='relu'))(m_sent1)
        # m_sent2 = TimeDistributed(Dense(128, activation='relu'))(m_sent2)

        compose = Bidirectional(LSTM(128, return_sequences=True))
        v_sent1 = compose(m_sent1)
        v_sent2 = compose(m_sent2)

        sent1_maxpool = GlobalMaxPool1D()(v_sent1)
        sent2_maxpool = GlobalMaxPool1D()(v_sent2)
        sent1_avgpool = GlobalAvgPool1D()(v_sent1)
        sent2_avgpool = GlobalAvgPool1D()(v_sent2)
        sent1 = concatenate([sent1_avgpool, sent1_maxpool], axis=-1)
        sent2 = concatenate([sent2_avgpool, sent2_maxpool], axis=-1)

        distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([sent1, sent2])
        # distance = Dot(axes=1, normalize=True)([sent1, sent2])
        # distance = Merge(mode=lambda x: exponent_neg_manhattan_distance([x[0], x[1]]),
        #                  output_shape=lambda x: (x[0][0], 1))([sent1, sent2])
        self.model = Model([input_a, input_b], distance)
        self.model.compile(loss=contrastive_loss,
                           optimizer=self.config.optimizer,
                           metrics=[acc])
