import torch
import torch.nn.functional as F


def import_keras():
    pass


def import_torch():
    pass


def audio2params_modelling(spec_shape, n_params, backend=''):
    if backend == 'pytorch':
        import_torch()

        class Network(torch.nn.Module):
            def __init__(self):
                super(Network, self).__init__()
                self.conv1 = torch.nn.Conv2d(spec_shape[-1], spec_shape[-1], kernel_size=(2, 2), stride=(2, 2), padding=(1, 1))
                self.dense = torch.nn.Linear(int(spec_shape[0]/2) * int(spec_shape[1]/2) * spec_shape[2], n_params)
                
                self.conv = torch.nn.Sequential(
                    torch.nn.Conv2d(spec_shape[-1], spec_shape[-1], kernel_size=(2, 2), stride=1, padding=(1, 1)),
                    torch.nn.ReLU(),
                    torch.nn.AvgPool2d(2, 2)
                )

            def forward(self, x_):
                print('spec_shape:', spec_shape)
                print(1, x_.size())
                
                x_ = torch.transpose(x_, 1, 3)
                print(2, x_.size())
                
                x_ = self.conv(x_)
                print(3, x_.size())
                
                x_ = torch.transpose(x_, 1, 3)
                print(4, x_.size())
                
                x_ = F.relu(x_)
                print(5, x_.size())
                
                x_ = F.dropout(x_, training=self.training)
                print(6, x_.size())

                x_ = x_.reshape(x_.size(0), -1)
                print(7, x_.size())
                
                x_ = self.dense(x_)
                print(8, x_.size())

                return torch.sigmoid(x_)

        return [Network()]
    elif backend == 'tensorflow':
        return []
    elif backend == 'keras' or not backend:
        from keras import Input, Model
        from keras.layers import Conv2D, TimeDistributed, LSTM, Flatten, Dense
        import keras

        layer_in = Input(shape=spec_shape)
        # x = Reshape((spec_shape[0], -1))(layer_in)
        x = Conv2D(8, 2, strides=(2, 2), activation='sigmoid')(layer_in)

        x = TimeDistributed(LSTM(32))(x)

        # x = Dense(4, activation='relu')(x)
        x = Flatten()(x)
        # x = Dropout(0.4)(x)

        x = Dense(n_params, activation='sigmoid')(x)

        model_ = keras.Model(layer_in, x)
        model_.compile(optimizer='adam', loss='mse', metrics='acc')
        return [model_]
    return []


def midi2midi_modelling(input_shape, backend=''):
    if backend == 'pytorch':
        return []
    elif backend == 'tensorflow':
        return []
    elif backend == 'keras' or not backend:
        return []
    return []


def audio2audio_modelling(spec_shape, latent_dim=2, backend=''):
    if backend == 'pytorch':
        return ''
    elif backend == 'tensorflow':
        return ''
    elif backend == 'keras' or not backend:
        import_keras()

        encoder_input = Input(shape=spec_shape)
        # x = Reshape((spec_shape[0], -1))(layer_in)
        x = Conv2D(8, 2, padding='same', activation='relu')(encoder_input)
        x = Conv2D(8, 2, strides=(2, 2), padding='same', activation='relu')(x)
        shape_before_flattening = K.int_shape(x)
        x = TimeDistributed(LSTM(4))(x)

        x = Flatten()(x)
        # x = Dropout(0.4)(x)
        x = Dense(8, activation='relu')(x)

        z_mean = Dense(latent_dim)(x)
        z_log_var = Dense(latent_dim)(x)

        def sampling(args):
            z_m, z_l_v = args
            epsilon = K.random_normal((K.shape(z_m)[0], latent_dim), 0., 1.)
            return z_m + K.exp(0.5 * z_l_v) * epsilon

        z = Lambda(sampling)([z_mean, z_log_var])

        decoder_input = Input(K.int_shape(z)[1:])
        x = Dense(np.prod(shape_before_flattening[1:]), activation='relu')(decoder_input)
        x = Reshape(shape_before_flattening[1:])(x)
        x = Conv2DTranspose(8, 2, strides=(2, 2), padding='same', activation='relu')(x)
        x = Conv2D(latent_dim, 2, padding='same', activation='sigmoid')(x)

        decoder = Model(decoder_input, x)
        z_decoded = decoder(z)

        class CustomLayer(Layer):
            def __init__(self):
                super(CustomLayer, self).__init__()

            def call(self, inputs):
                x_, z_decoded_, z_m, z_l_v = inputs
                x_ = K.flatten(x_)
                z_decoded_ = K.flatten(z_decoded_)
                x_ent_loss = binary_crossentropy(x_, z_decoded_)
                kl_loss = -5e-4 * K.mean(1 + z_l_v - K.square(z_m) - K.exp(z_l_v), axis=-1)
                loss = K.mean(x_ent_loss + kl_loss)

                self.add_loss(loss, inputs=inputs)
                return x_

        y = CustomLayer()([encoder_input, z_decoded, z_mean, z_log_var])

        vae = Model(encoder_input, y)

        vae.compile(optimizer='rmsprop', loss=None)
        return [vae, decoder]
    return []


def params2params_modelling(n_params, latent_dim=2, backend=''):
    if backend == 'pytorch':
        return ''
    elif backend == 'tensorflow':
        return ''
    elif backend == 'keras' or not backend:
        return ''
    return []
