import numpy as np
# import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D, Input, Activation, Reshape, BatchNormalization, UpSampling2D, LeakyReLU, Concatenate, Dropout, AveragePooling2D
# from tensorflow.python.keras.backend import shape
# from tensorflow.keras.layers import merge
# from tensorflow.keras.layers import Embedding, Multiply


def SqueezeNet():
    nb_classes = 10
    inputs = (224, 224, 3)

    input_img = Input(shape=inputs)
    conv1 = Conv2D(96, 7, activation='relu', strides=(2, 2), padding='same', name='conv1')(input_img)
    maxpool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool1')(conv1)

    fire2_squeeze = Conv2D(16, (1, 1), activation='relu', padding='same', name='fire2_squeeze')(maxpool1)
    fire2_expand1 = Conv2D(64, (1, 1), activation='relu', padding='same', name='fire2_expand1')(fire2_squeeze)
    fire2_expand2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='fire2_expand2')(fire2_squeeze)
    merge2 = Concatenate(axis=-1)([fire2_expand1, fire2_expand2])

    fire3_squeeze = Conv2D(16, (1, 1), activation='relu', padding='same', name='fire3_squeeze')(merge2)
    fire3_expand1 = Conv2D(64, (1, 1), activation='relu', padding='same', name='fire3_expand1')(fire3_squeeze)
    fire3_expand2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='fire3_expand2')(fire3_squeeze)
    merge3 = Concatenate(axis=-1)([fire3_expand1, fire3_expand2])

    fire4_squeeze = Conv2D(32, (1, 1), activation='relu', padding='same', name='fire4_squeeze')(merge3)
    fire4_expand1 = Conv2D(128, (1, 1), activation='relu', padding='same', name='fire4_expand1')(fire4_squeeze)
    fire4_expand2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='fire4_expand2')(fire4_squeeze)
    merge4 = Concatenate(axis=-1)([fire4_expand1, fire4_expand2])
    maxpool4 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool4')(merge4)

    fire5_squeeze = Conv2D(32, (1, 1), activation='relu', padding='same', name='fire5_squeeze')(maxpool4)
    fire5_expand1 = Conv2D(128, (1, 1), activation='relu', padding='same', name='fire5_expand1')(fire5_squeeze)
    fire5_expand2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='fire5_expand2')(fire5_squeeze)
    merge5 = Concatenate(axis=-1)([fire5_expand1, fire5_expand2])

    fire6_squeeze = Conv2D(48, (1, 1), activation='relu', padding='same', name='fire6_squeeze')(merge5)
    fire6_expand1 = Conv2D(192, (1, 1), activation='relu', padding='same', name='fire6_expand1')(fire6_squeeze)
    fire6_expand2 = Conv2D(192, (3, 3), activation='relu', padding='same', name='fire6_expand2')(fire6_squeeze)
    merge6 = Concatenate(axis=-1)([fire6_expand1, fire6_expand2])

    fire7_squeeze = Conv2D(48, (1, 1), activation='relu', padding='same', name='fire7_squeeze')(merge6)
    fire7_expand1 = Conv2D(192, (1, 1), activation='relu', padding='same', name='fire7_expand1')(fire7_squeeze)
    fire7_expand2 = Conv2D(192, (3, 3), activation='relu', padding='same', name='fire7_expand2')(fire7_squeeze)
    merge7 = Concatenate(axis=-1)([fire7_expand1, fire7_expand2])

    fire8_squeeze = Conv2D(64, (1, 1), activation='relu', padding='same', name='fire8_squeeze')(merge7)
    fire8_expand1 = Conv2D(256, (1, 1), activation='relu', padding='same', name='fire8_expand1')(fire8_squeeze)
    fire8_expand2 = Conv2D(256, (3, 3), activation='relu', padding='same', name='fire8_expand2')(fire8_squeeze)
    merge8 = Concatenate(axis=-1)([fire8_expand1, fire8_expand2])

    maxpool8 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='maxpool8')(merge8)

    fire9_squeeze = Conv2D(64, (1, 1), activation='relu', padding='same', name='fire9_squeeze')(maxpool8)
    fire9_expand1 = Conv2D(256, (1, 1), activation='relu', padding='same', name='fire9_expand1')(fire9_squeeze)
    fire9_expand2 = Conv2D(256, (3, 3), activation='relu', padding='same', name='fire9_expand2')(fire9_squeeze)
    merge9 = Concatenate(axis=-1)([fire9_expand1, fire9_expand2])

    fire9_dropout = Dropout(0.5, name='fire9_dropout')(merge9)
    conv10 = Conv2D(nb_classes, 1, 1, padding='valid', name='conv10')(fire9_dropout)
    # The size should match the output of conv10
    avgpool10 = AveragePooling2D((13, 13), name='avgpool10')(conv10)

    flatten = Flatten(name='flatten')(avgpool10)
    softmax = Activation("softmax", name='softmax')(flatten)

    return Model(inputs=input_img, outputs=softmax)


def ZFNet():

    input_shape = (224, 224, 3)
    classNumber = 10

    model = Sequential()
    model.add(Input(shape=input_shape, batch_size=1, name="input_1"))
    model.add(
        Conv2D(96, (7, 7),
               strides=(2, 2),
               input_shape=input_shape,
               padding='valid',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Conv2D(256, (5, 5), strides=(2, 2), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classNumber, activation='softmax'))
    return model


def AlexNet():

    classNumber = 10
    model = Sequential()
    # input_shape = (64,64, self.config.channles)
    input_shape = (224, 224, 3)
    model.add(Input(shape=input_shape, batch_size=1, name="input_1"))
    model.add(
        Conv2D(96, (11, 11),
               input_shape=input_shape,
               strides=(4, 4),
               padding='valid',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))  # 26*26
    model.add(Conv2D(256, (5, 5), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Flatten())
    # model.add(Dense(4096, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    # model.add(Dense(4096, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classNumber, activation='softmax'))
    # model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
    return model


def UNet():
    input_size = (256, 256, 1)
    inputs = Input(input_size)
    conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
    conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation='relu', padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
    merge6 = Concatenate(axis=3)([drop4, up6])
    conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256, 2, activation='relu', padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
    merge7 = Concatenate(axis=3)([conv3, up7])
    conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128, 2, activation='relu', padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
    merge8 = Concatenate(axis=3)([conv2, up8])
    conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64, 2, activation='relu', padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
    merge9 = Concatenate(axis=3)([conv1, up9])
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = Model(inputs=inputs, outputs=conv10)
    return model


def ACGAN():

    # img_shape = (28, 28, 1)
    latent_dim = 100
    # num_classes = 10
    channels = 1
    model = Sequential()
    model.add(Input(shape=(100, ), batch_size=1, name="input_1"))
    model.add(Dense(128 * 7 * 7, activation="relu", input_dim=latent_dim))
    model.add(Reshape((7, 7, 128)))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling2D())
    model.add(Conv2D(128, kernel_size=3, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling2D())
    model.add(Conv2D(64, kernel_size=3, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(channels, kernel_size=3, padding='same'))
    model.add(Activation("tanh"))

    model.summary()

    return model


def BiGAN():
    img_shape = (28, 28, 1)
    model = Sequential()
    model.add(Input(shape=(100, ), batch_size=1, name="input_1"))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(np.prod(img_shape), activation='tanh'))
    model.add(Reshape(img_shape))
    return model


def BGAN():
    img_shape = (28, 28, 1)
    model_bgan = Sequential()
    model_bgan.add(Input(shape=(100, ), batch_size=1, name="input_1"))
    model_bgan.add(Dense(256, input_dim=100))
    model_bgan.add(LeakyReLU(alpha=0.2))
    model_bgan.add(BatchNormalization(momentum=0.8))
    model_bgan.add(Dense(512))
    model_bgan.add(LeakyReLU(alpha=0.2))
    model_bgan.add(BatchNormalization(momentum=0.8))
    model_bgan.add(Dense(1024))
    model_bgan.add(LeakyReLU(alpha=0.2))
    model_bgan.add(BatchNormalization(momentum=0.8))
    model_bgan.add(Dense(np.prod(img_shape), activation='tanh'))
    model_bgan.add(Reshape(img_shape))
    model_bgan.add(Flatten(input_shape=img_shape))
    model_bgan.add(Dense(512))
    model_bgan.add(LeakyReLU(alpha=0.2))
    model_bgan.add(Dense(256))
    model_bgan.add(LeakyReLU(alpha=0.2))
    model_bgan.add(Dense(1, activation='sigmoid'))
    return model_bgan


def LeNet():
    model = Sequential()
    model.add(Input(shape=(28, 28, 1), batch_size=1, name="input_1"))
    model.add(Conv2D(6, kernel_size=(5, 5), padding='valid', activation='relu'))
    model.add(MaxPooling2D(padding='same'))
    model.add(Conv2D(16, kernel_size=(5, 5), padding='valid', activation='relu'))
    model.add(MaxPooling2D(padding='same'))
    model.add(Flatten())
    model.add(Dense(120, activation='relu'))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    return model


class GAN(Model):
    '''
    Simple Generative Adversarial Network
    '''
    def __init__(self):
        super().__init__()
        self.G = Generator()
        self.D = Discriminator()

    def call(self, x):
        x = self.G(x)
        y = self.D(x)

        return y


class Discriminator(Model):
    def __init__(self):
        super().__init__()
        self.conv1 = Conv2D(128, kernel_size=(3, 3), strides=(2, 2), padding='same')
        self.relu1 = LeakyReLU(0.2)
        self.conv2 = Conv2D(256, kernel_size=(3, 3), strides=(2, 2), padding='same')
        self.bn2 = BatchNormalization()
        self.relu2 = LeakyReLU(0.2)
        self.reshape = Reshape([256 * 7 * 7])
        self.fc = Dense(1024)
        self.bn3 = BatchNormalization()
        self.relu3 = LeakyReLU(0.2)
        self.out = Dense(1, activation='sigmoid')

    def call(self, x):
        h = self.conv1(x)
        h = self.relu1(h)
        h = self.conv2(h)
        h = self.bn2(h)
        h = self.relu2(h)
        h = self.reshape(h)
        h = self.fc(h)
        h = self.bn3(h)
        h = self.relu3(h)
        y = self.out(h)

        return y


class Generator(Model):
    def __init__(self, input_dim=100):
        super().__init__()
        self.linear = Dense(256 * 14 * 14)
        self.bn1 = BatchNormalization()
        self.relu1 = Activation('relu')
        self.reshape = Reshape([14, 14, 256])
        self.upsample = UpSampling2D(size=(2, 2))
        self.conv1 = Conv2D(128, kernel_size=(3, 3), padding='same')
        self.bn2 = BatchNormalization()
        self.relu2 = Activation('relu')
        self.conv2 = Conv2D(64, kernel_size=(3, 3), padding='same')
        self.bn3 = BatchNormalization()
        self.relu3 = Activation('relu')
        self.conv3 = Conv2D(1, kernel_size=(1, 1))
        self.out = Activation('sigmoid')

    def call(self, x):
        h = self.linear(x)
        h = self.bn1(h)
        h = self.relu1(h)
        h = self.reshape(h)
        h = self.upsample(h)
        h = self.conv1(h)
        h = self.bn2(h)
        h = self.relu2(h)
        h = self.conv2(h)
        h = self.bn3(h)
        h = self.relu3(h)
        h = self.conv3(h)
        y = self.out(h)

        return y
