from keras.layers import Activation, Dense, Input, merge
from keras.layers.convolutional import Conv3D, UpSampling3D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import GlobalAveragePooling3D, MaxPooling3D
from keras.models import Model
from keras.regularizers import l2
# from keras.utils.visualize_util import plot
from layers import decoder_loss
import numpy as np
import os

global weight_decay


def _conv_bn_relu(nb_filter, nb_dim1, nb_dim2, nb_dim3, subsample=(1, 1, 1)):
    def f(input_layer):
        conv = Conv3D(nb_filter=nb_filter, kernel_dim1=nb_dim1, kernel_dim2=nb_dim2, kernel_dim3=nb_dim3,
                      subsample=subsample, init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(
            input_layer)
        norm = BatchNormalization()(conv)
        return Activation('relu')(norm)

    return f


def _bn_relu_conv(nb_filter, nb_dim1, nb_dim2, nb_dim3, subsample=(1, 1, 1)):
    def f(input_layer):
        norm = BatchNormalization()(input_layer)
        activation = Activation('relu')(norm)
        return Conv3D(nb_filter=nb_filter, kernel_dim1=nb_dim1, kernel_dim2=nb_dim2, kernel_dim3=nb_dim3,
                      subsample=subsample, init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(
            activation)

    return f


def _shortcut(input_layer, residual):
    stride_dim1 = input_layer._keras_shape[1] // residual._keras_shape[1]
    stride_dim2 = input_layer._keras_shape[2] // residual._keras_shape[2]
    stride_dim3 = input_layer._keras_shape[3] // residual._keras_shape[3]
    equal_channels = residual._keras_shape[4] == input_layer._keras_shape[4]

    shortcut = input_layer
    if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 or not equal_channels:
        shortcut = Conv3D(nb_filter=residual._keras_shape[4], kernel_dim1=1, kernel_dim2=1, kernel_dim3=1,
                          subsample=(stride_dim1, stride_dim2, stride_dim3), init="he_normal", border_mode="valid",
                          W_regularizer=l2(weight_decay))(input_layer)

    return merge([shortcut, residual], mode="sum")


def _residual_block(block_function, nb_filters, repetitions):
    def f(input_layer):
        for i in range(repetitions):
            init_subsample = (1, 1, 1)
            if i == 0:
                init_subsample = (2, 2, 2)
            input_layer = block_function(nb_filters=nb_filters, init_subsample=init_subsample)(input_layer)
        return input_layer

    return f


def basic_block(nb_filters, init_subsample=(1, 1, 1)):
    def f(input_layer):
        conv1 = _bn_relu_conv(nb_filters, 3, 3, 3, subsample=init_subsample)(input_layer)
        residual = _bn_relu_conv(nb_filters, 3, 3, 3)(conv1)
        return _shortcut(input_layer, residual)

    return f


def bottleneck(nb_filters, init_subsample=(1, 1)):
    def f(input_layer):
        conv_1_1 = _bn_relu_conv(nb_filters, 1, 1, 1, subsample=init_subsample)(input_layer)
        conv_3_3 = _bn_relu_conv(nb_filters, 3, 3, 3)(conv_1_1)
        residual = _bn_relu_conv(nb_filters * 4, 1, 1, 1)(conv_3_3)
        return _shortcut(input_layer, residual)

    return f


def _conv1(input_shape):
    input_layer = Input(input_shape)
    conv1 = _conv_bn_relu(nb_filter=32, nb_dim1=5, nb_dim2=5, nb_dim3=5, subsample=(1, 1, 1))(input_layer)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode="same")(conv1)
    model = Model(input=input_layer, output=pool1, name='conv1')
    return model


def _conv(input_shape, block_fn, repetition, nb_filters, layers):
    input_layer = Input(input_shape)
    block = _residual_block(block_fn, nb_filters=nb_filters, repetitions=repetition)(input_layer)
    model = Model(input=input_layer, output=block, name='conv{}'.format(layers))
    return model


def _decoder(input_shape):
    input_layer = Input(input_shape)
    x = UpSampling3D((2, 2, 2))(input_layer)
    x = Conv3D(nb_filter=64, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = Conv3D(nb_filter=64, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = UpSampling3D((2, 2, 2))(x)
    x = Conv3D(nb_filter=32, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = Conv3D(nb_filter=32, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = UpSampling3D((2, 2, 2))(x)
    x = Conv3D(nb_filter=16, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = Conv3D(nb_filter=16, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = UpSampling3D((2, 2, 2))(x)
    x = Conv3D(nb_filter=8, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = Conv3D(nb_filter=8, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = UpSampling3D((2, 2, 2))(x)
    x = Conv3D(nb_filter=1, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='relu',
               init="he_normal", border_mode="same", W_regularizer=l2(weight_decay))(x)
    x = Conv3D(nb_filter=1, kernel_dim1=3, kernel_dim2=3, kernel_dim3=3, activation='sigmoid', init="he_normal",
               border_mode="same", W_regularizer=l2(weight_decay))(x)
    model = Model(input=input_layer, output=x, name='deconv')
    return model


class ResnetBuilder(object):
    @staticmethod
    def build(num_outputs=None, output_names=None, block_fn=basic_block, repetitions=(2, 2, 2, 2), wd=0, decoder=False):

        if len(repetitions) != 4:
            raise Exception("repetitions wrong!")
        if len(num_outputs) != len(output_names):
            raise Exception("all outputs must have names!")
        global weight_decay
        weight_decay = wd

        model_conv1 = _conv1((64, 64, 64, 1))
        model_conv2 = _conv(model_conv1.output_shape[1:], block_fn, repetitions[0], 16, 2)
        model_conv3 = _conv(model_conv2.output_shape[1:], block_fn, repetitions[1], 32, 3)
        model_conv4 = _conv(model_conv3.output_shape[1:], block_fn, repetitions[2], 64, 4)
        model_conv5 = _conv(model_conv4.output_shape[1:], block_fn, repetitions[3], 128, 5)

        input_layer = Input((64, 64, 64, 1), name='input64')
        conv = model_conv1(input_layer)
        conv = model_conv2(conv)
        conv = model_conv3(conv)
        conv = model_conv4(conv)
        conv = model_conv5(conv)

        if decoder is False:
            pool1 = GlobalAveragePooling3D()(conv)
            output_layer = []
            for i, num_output in enumerate(num_outputs):
                output_layer.append(Dense(output_dim=num_output, init="he_normal", activation="softmax",
                                          W_regularizer=l2(weight_decay), name=output_names[i])(
                    pool1))
        else:
            model_decoder = _decoder(model_conv5.output_shape[1:])
            output_layer = model_decoder(conv)

        model = Model(input=input_layer, output=output_layer)
        return model

    @staticmethod
    def build_resnet_18(num_outputs, output_names, wd=0, decoder=False):
        return ResnetBuilder.build(num_outputs, output_names, basic_block, (2, 2, 2, 2), wd=wd, decoder=decoder)

    @staticmethod
    def build_resnet_34(num_outputs, output_names, wd=0, decoder=False):
        return ResnetBuilder.build(num_outputs, output_names, basic_block, (3, 4, 6, 3), wd=wd, decoder=decoder)

    @staticmethod
    def build_resnet_50(num_outputs, output_names, wd=0, decoder=False):
        return ResnetBuilder.build(num_outputs, output_names, bottleneck, (3, 4, 6, 3), wd=wd, decoder=decoder)

    @staticmethod
    def build_resnet_101(num_outputs, output_names, wd=0, decoder=False):
        return ResnetBuilder.build(num_outputs, output_names, bottleneck, (3, 4, 23, 3), wd=wd, decoder=decoder)

    @staticmethod
    def build_resnet_152(num_outputs, output_names, wd=0, decoder=False):
        return ResnetBuilder.build(num_outputs, output_names, bottleneck, (3, 8, 36, 3), wd=wd, decoder=decoder)


def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    model = ResnetBuilder.build_resnet_50((5,), ('Mal',), decoder=False)
    model.compile(loss='categorical_crossentropy', optimizer="adam")
    model.summary()
    # for _ in range(10000):
    #     x = np.ones((16, 64, 64, 64, 1)) * np.random.random()
    #     loss = model.train_on_batch(x, x)
    #     print('loss:{}'.format(loss))
    #     # plot(model, to_file='model_resnet3D.png', show_shapes=True)


if __name__ == '__main__':
    main()
