from keras.models import Sequential
from keras.layers import Layer, Flatten, Dense, Input, Conv1D, MaxPooling1D, AveragePooling1D,LSTM
from keras.layers import BatchNormalization, Add, Activation, GRU, Reshape, Dropout,Multiply
from keras.layers import LayerNormalization,GlobalMaxPooling1D
from keras.optimizers import RMSprop

def neuralnetwork(func_model):
    def new_func_model(attacker):
        attack_method=attacker.param_dict['attack_method']
        if attack_method=='DLSCA' or attack_method=='COMB':
            input_dim = attacker.traces_profile.shape[1]
            midvalue_shape=attacker.midvalue_profile.shape
        else:
            input_dim = attacker.traces_attack.shape[1]
            midvalue_shape = attacker.midvalue_attack.shape
        activation = attacker.param_dict["activation"]
        model = func_model(input_dim, activation)
        if len(midvalue_shape) == 2:
            activation, loss = "sigmoid", "binary_crossentropy"
        else:
            activation, loss = "softmax", "categorical_crossentropy"
        learning_rate = attacker.param_dict["learning_rate"]
        model.add(Dense(attacker.output_class, activation=activation))
        optimizer = RMSprop(learning_rate=learning_rate)
        model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
        return model
    return new_func_model

@neuralnetwork
def DLNW_MLP1(input_dim, activation):
    model = Sequential()
    model.add(Dense(200, input_dim=input_dim, activation=activation))
    model.add(Dense(200, activation=activation))
    model.add(Dense(200, activation=activation))
    model.add(Dense(200, activation=activation))
    model.add(Dense(200, activation=activation))
    return model


@neuralnetwork
def DLNW_MLP2(input_dim, activation):
    model = Sequential()
    model.add(Dense(200, input_dim=input_dim, activation=activation))
    model.add(Dense(120, activation=activation))
    model.add(Dense(40, activation=activation))
    model.add(Dense(40, activation=activation))
    return model


@neuralnetwork
def DLNW_CNN1(input_dim, activation):
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Conv1D(64, 11, activation=activation, padding='same'))
    model.add(MaxPooling1D(2, strides=2))
    model.add(Conv1D(128, 11, activation=activation, padding='same'))
    model.add(MaxPooling1D(2, strides=2))
    model.add(Conv1D(256, 11, activation=activation, padding='same'))
    model.add(MaxPooling1D(2, strides=2))
    model.add(Conv1D(512, 11, activation=activation, padding='same'))
    model.add(MaxPooling1D(2, strides=2))
    model.add(Conv1D(512, 11, activation=activation, padding='same'))
    model.add(MaxPooling1D(2, strides=2))
    model.add(Flatten())
    model.add(Dense(512, activation=activation))
    model.add(Dense(256, activation=activation))
    return model


@neuralnetwork
def DLNW_CNN2(input_dim, activation):
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Conv1D(64, 11, strides=2, activation=activation))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Conv1D(128, 11, activation=activation, padding='same'))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Conv1D(256, 11, activation=activation, padding='same'))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Conv1D(512, 11, activation=activation, padding='same'))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Conv1D(512, 11, activation=activation, padding='same'))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Flatten())
    model.add(Dense(4096, activation=activation))
    model.add(Dense(4096, activation=activation))
    return model


class identity_block(Layer):
    def __init__(self, filters, kernel_size, strides=1, activation="relu", padding="same"):
        self.activation = activation
        self.layer_Conv1D_1 = Conv1D(filters, kernel_size, strides, padding, activation=activation)
        self.layer_Conv1D_2 = Conv1D(filters, kernel_size, strides, padding, activation=activation)
        self.layer_BN_1 = BatchNormalization(axis=1)
        self.layer_BN_2 = BatchNormalization(axis=1)
        super().__init__()

    def call(self, inputs, *args, **kwargs):
        x = self.layer_Conv1D_1(inputs)
        x = self.layer_BN_1(x)
        x = self.layer_Conv1D_2(x)
        x = self.layer_BN_2(x)
        x = Add()([x, inputs])
        x = Activation(self.activation)(x)
        return x


class convolution_block(Layer):
    def __init__(self, filters, kernel_size, strides=1, activation="relu", padding="same"):
        self.activation = activation
        self.layer_Conv1D_1 = Conv1D(filters, kernel_size, strides, padding, activation=activation)
        self.layer_Conv1D_2 = Conv1D(filters, kernel_size, strides, padding, activation=activation)
        self.layer_Conv1D_3 = Conv1D(filters, kernel_size, strides, padding, activation=activation)
        self.layer_BN_1 = BatchNormalization(axis=1)
        self.layer_BN_2 = BatchNormalization(axis=1)
        self.layer_BN_3 = BatchNormalization(axis=1)
        super().__init__()

    def call(self, inputs, *args, **kwargs):
        x = self.layer_Conv1D_1(inputs)
        x = self.layer_BN_1(x)
        x = self.layer_Conv1D_2(x)
        x = self.layer_BN_2(x)
        x_short = self.layer_Conv1D_3(inputs)
        x_short = self.layer_BN_3(x_short)
        x = Add()([x, x_short])
        x = Activation(self.activation)(x)
        return x


@neuralnetwork
def DLNW_RESNET(input_dim, activation):
    kernel_size = 9
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Conv1D(64, kernel_size, activation=activation, padding='same'))
    model.add(AveragePooling1D(2, 2))
    model.add(convolution_block(64, kernel_size, activation=activation))
    model.add(identity_block(64, kernel_size, activation=activation))
    model.add(AveragePooling1D(2, 2))
    model.add(convolution_block(128, kernel_size, activation=activation))
    model.add(identity_block(128, kernel_size, activation=activation))
    model.add(AveragePooling1D(2, 2))
    model.add(convolution_block(256, kernel_size, activation=activation))
    model.add(identity_block(256, kernel_size, activation=activation))
    model.add(AveragePooling1D(2, 2))
    model.add(convolution_block(512, kernel_size, activation=activation))
    model.add(identity_block(512, kernel_size, activation=activation))
    model.add(AveragePooling1D(2, 2))
    model.add(Flatten())
    model.add(Dense(4096, activation=activation))
    model.add(Dense(4096, activation=activation))
    return model


@neuralnetwork
def DLNW_CNNMGU(input_dim, activation):
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Conv1D(4, 10, activation=activation, padding='same'))
    model.add(BatchNormalization(momentum=0.9))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Flatten())
    model.add(Reshape((1, -1)))
    model.add(GRU(256, return_sequences=False))
    model.add(Dropout(0.5))
    return model


@neuralnetwork
def DLNW_LSTM(input_dim, activation):
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Reshape((1, -1)))
    model.add(LSTM(256, return_sequences=False, activation=activation))
    model.add(Dense(512, activation=activation))
    return model


@neuralnetwork
def DLNW_CNNLSTM(input_dim, activation):
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Conv1D(4, 50, activation=activation, padding='same'))
    model.add(BatchNormalization(momentum=0.9))
    model.add(AveragePooling1D(2, strides=2))
    model.add(Reshape((1, -1)))
    model.add(LSTM(128, return_sequences=False, activation=activation))
    model.add(BatchNormalization(momentum=0.9))
    model.add(Dropout(0.5))
    return model

class self_attention(Layer):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.dense1=Dense(32, activation='softmax')
        self.dense2=Dense(64, activation='relu')
        self.layernorm=LayerNormalization()
        
    def call(self, inputs, *args, **kwargs):
        attention_probs = self.dense1(inputs)
        x = Reshape((-1,1))(inputs)
        y = Reshape((1,-1))(attention_probs)
        attention_mul = Multiply()([x, y])
        mlp=self.dense2(attention_mul)
        return self.layernorm(mlp)

@neuralnetwork
def DLNW_SelfAttention(input_dim, activation):
    model = Sequential()
    model.add(Input(shape=(input_dim, 1)))
    model.add(Flatten())
    model.add(self_attention())
    model.add(Reshape((-1, 32)))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(256,activation=activation))
    model.add(Dropout(0.5))
    model.summary()
    return model

custom_comp={'identity_block':identity_block,'convolution_block':convolution_block,'self_attention':self_attention}