import math

from tensorflow.keras.layers import Conv1D, AveragePooling1D, Flatten, Dense, Input
from tensorflow.keras import Model, layers
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Activation, Flatten, LocallyConnected1D, Reshape, Input, Multiply, Permute, \
    RepeatVector, Lambda, CuDNNLSTM, BatchNormalization
from keras.layers import Conv1D, Conv2D, AveragePooling1D, MaxPooling1D, MaxPooling2D, Concatenate, Add, ZeroPadding1D
from keras import regularizers, constraints
from keras import backend as K
import tensorflow as tf


def get_triplet_model(input_shape, output_shape=32, embedding=False):
    if len(input_shape) != 2:
        raise Exception("Input shape must be 2 for Model")
    input_layer = Input(shape=input_shape)
    # Convolutional layers
    conv1 = Conv1D(filters=64, kernel_size=15, padding="same", activation="selu")(input_layer)
    pool1 = AveragePooling1D(pool_size=15, strides=15)(conv1)
    conv2 = Conv1D(filters=128, kernel_size=3, padding="same", activation="selu")(pool1)
    pool2 = AveragePooling1D(pool_size=2, strides=2)(conv2)
    # Flatten layer
    flatten = Flatten(name='Flatten')(pool2)
    # Output layer
    if embedding:
        output_layer = Dense(output_shape)(flatten)
    else:
        output_layer = Dense(output_shape, activation="softmax")(flatten)
    # Define the model
    return Model(inputs=input_layer, outputs=output_layer)


def get_vgg16_model(input_shape, output_shape=32, embedding=False):
    if len(input_shape) != 2:
        raise Exception("Input shape must be 2 for Model")
    img_input = Input(shape=input_shape)
    x = Conv1D(64, 11, activation='relu', padding='same', name='block1_conv1')(img_input)
    x = AveragePooling1D(2, strides=2, name='block1_pool')(x)

    x = Conv1D(128, 11, activation='relu', padding='same', name='block2_conv1')(x)
    x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
    # x = SelfAttention(128)(x)

    x = Flatten(name='flatten')(x)
    # x = Dense(256, activation='relu', name='fc1')(x)
    # x = Dense(256, activation='relu', name='fc2')(x)
    if embedding:
        x = Dense(output_shape, name='embedding')(x)
    else:
        x = Dense(output_shape, activation='softmax', name='predictions')(x)
    inputs = img_input
    Model(inputs, x).summary()
    return Model(inputs, x)


def get_cnn_pay_attention_model(input_shape, units=128, output_shape=32, embedding=False):
    _input = Input(shape=(input_shape))

    # Local = LocallyConnected1D(filters=1, kernel_size=52, strides=26, padding='valid', activation=None,
    #                            use_bias=True,
    #                            kernel_regularizer=regularizers.l2(1e-3),
    #                            bias_regularizer=regularizers.l2(1e-3)
    #                            )(_input)

    # Local = Reshape((-1, 2))(Local)
    Local = _input
    FW_LSTM_out = CuDNNLSTM(units, return_sequences=True
                            #                         recurrent_regularizer=regularizers.l2(1e-5),
                            #                         kernel_regularizer=regularizers.l2(1e-5),
                            #                         bias_regularizer=regularizers.l2(1e-5)
                            #                         recurrent_constraint = constraints.UnitNorm(axis=0)
                            )(Local)

    BW_LSTM_out = CuDNNLSTM(units, return_sequences=True, go_backwards=True
                            #                         recurrent_regularizer=regularizers.l2(1e-5),
                            #                         kernel_regularizer=regularizers.l2(1e-5),
                            #                         bias_regularizer=regularizers.l2(1e-5)
                            #                         recurrent_constraint = constraints.UnitNorm(axis=0)
                            )(Local)

    BW_LSTM_out = Lambda(lambda xin: K.reverse(xin, axes=-2))(BW_LSTM_out)

    FW_LSTM_out_BN = BatchNormalization()(FW_LSTM_out)
    FW_LSTM_out_BN_act = Activation('tanh')(FW_LSTM_out_BN)

    BW_LSTM_out_BN = BatchNormalization()(BW_LSTM_out)
    BW_LSTM_out_BN_act = Activation('tanh')(BW_LSTM_out_BN)

    FW_attention = Dense(1, use_bias=False)(FW_LSTM_out_BN_act)
    FW_attention = Flatten()(FW_attention)
    FW_attention = BatchNormalization()(FW_attention)
    FW_attention = Activation('softmax', name='FW_attention')(FW_attention)

    FW_attention = RepeatVector(units)(FW_attention)
    FW_attention = Permute([2, 1])(FW_attention)

    FW_sent_representation = Multiply()([FW_LSTM_out_BN, FW_attention])
    FW_sent_representation = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(units,))(FW_sent_representation)
    FW_sent_representation = Dropout(0.5)(FW_sent_representation)

    BW_attention = Dense(1, use_bias=False)(BW_LSTM_out_BN_act)
    BW_attention = Flatten()(BW_attention)
    BW_attention = BatchNormalization()(BW_attention)
    BW_attention = Activation('softmax', name='BW_attention')(BW_attention)

    BW_attention = RepeatVector(units)(BW_attention)
    BW_attention = Permute([2, 1])(BW_attention)

    BW_sent_representation = Multiply()([BW_LSTM_out_BN, BW_attention])
    BW_sent_representation = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(units,))(BW_sent_representation)
    BW_sent_representation = Dropout(0.5)(BW_sent_representation)

    FB_represent = Concatenate()([FW_sent_representation, BW_sent_representation])

    if embedding:
        output_probabilities = Dense(output_shape)(FB_represent)
        output_probabilities = BatchNormalization()(output_probabilities)
    else:
        output_probabilities = Dense(output_shape)(FB_represent)
        output_probabilities = BatchNormalization()(output_probabilities)
        output_probabilities = Activation('softmax')(output_probabilities)

    return Model(inputs=_input, outputs=output_probabilities)


def get_vgg16_model1(input_shape, output_shape=32, embedding=False):
    if len(input_shape) != 2:
        raise Exception("Input shape must be 2 for Model")
    img_input = Input(shape=input_shape)
    x = Conv1D(128, 11, activation='relu', padding='same', name='block1_conv1')(img_input)

    # +++++++++++++++++++
    attention = Dense(1, use_bias=False)(x)

    attention = Flatten()(attention)

    attention = BatchNormalization()(attention)

    attention = Activation('softmax')(attention)

    attention = RepeatVector(128)(attention)
    attention = Permute([2, 1])(attention)
    x = Multiply()([x, attention])
    x = AveragePooling1D(2, strides=2, name='block1_pool')(x)

    x = Conv1D(128, 11, activation='relu', padding='same', name='block2_conv1')(x)
    x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
    # x = SelfAttention(128)(x)

    x = Flatten(name='flatten1')(x)
    x = Dense(256, activation='relu', name='fc1')(x)
    x = Dense(256, activation='relu', name='fc2')(x)
    if embedding:
        x = Dense(output_shape, name='embedding')(x)
    else:
        x = Dense(output_shape, activation='softmax', name='predictions')(x)
    inputs = img_input
    return Model(inputs, x)


def get_mlp_model(input_shape, output_shape=32, embedding=False):
    # print(input_shape)
    input = Input(shape=input_shape)
    # x = input
    # x = Dense(input_shape[0], activation='relu')(input)
    x = Dense(100, activation='relu')(input)
    # x = Dropout(0.5)(x)
    x = Dense(100, activation='relu')(x)
    # x = Dropout(0.5)(x)
    x = Dense(100, activation='relu')(x)
    # x = Dropout(0.5)(x)
    if embedding:
        x = Dense(output_shape, name='embedding')(x)
    else:
        x = Dense(output_shape, activation='softmax', name='predictions')(x)
    return Model(input, x)


if __name__ == '__main__':
    model = get_cnn_pay_attention_model(input_shape=(300, 1), output_shape=32, embedding=False)
    model.summary()
