import logging

import tensorflow.keras.backend as K
import math
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input, GRU
from tensorflow.keras.layers import Conv2D,Bidirectional,LSTM
from tensorflow.keras.layers import Lambda, Dense, RepeatVector
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import BatchNormalization,MaxPooling2D,concatenate,Activation,AveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
import tensorflow as tf
import kaldi_tflite as ktf
import numpy as np

from constants import *
tf.random.set_seed(1234)
def clipped_relu(inputs):
    return Lambda(lambda y: K.minimum(K.maximum(y, 0), 20))(inputs)

def attention_pooling(inputs, **kwargs):
    [out, att] = inputs
    epsilon = 1e-7
    att = K.clip(att, epsilon, 1. - epsilon)
    normalized_att = att / K.sum(att, axis=1)[:, None, :]
    return K.sum(out * normalized_att, axis=1)

def pooling_shape(input_shape):
    if isinstance(input_shape, list):
        (sample_num, time_steps, freq_bins) = input_shape[0]
    else:
        (sample_num, time_steps, freq_bins) = input_shape
    return (sample_num, freq_bins)

def multi_scale_model(input_shape=(NUM_FRAMES,64, 1)):

    #Multi-scale CNN-related
    def MSB(filter_num):
        def f(x):
            params = {
                'strides': 1,
                'padding': 'same',
                'kernel_regularizer': l2(5e-4)
            }
            x1 = Conv2D(filters=filter_num, kernel_size=(1, 1), **params)(x)

            x_ = Conv2D(filters=filter_num, kernel_size=(1, 1), **params)(x)
            x2 = Conv2D(filters=filter_num, dilation_rate=[1,2],kernel_size=(3, 3), **params)(x_)

            x_ = Conv2D(filters=filter_num, kernel_size=(1, 1), **params)(x)
            x3 = Conv2D(filters=filter_num, dilation_rate=[1,2],kernel_size=(5, 5), **params)(x_)

            x_ = Conv2D(filters=filter_num, kernel_size=(1, 1), **params)(x)
            x4 = Conv2D(filters=filter_num, kernel_size=(5, 3), **params)(x_)
            x = concatenate([x1, x2, x3, x4])
            # x = BatchNormalization()(x)
            x =clipped_relu(x)
            return x

        return f

    def MSB_mini(filter_num):
        def f(x):
            params = {
                'strides': 1,
                'padding': 'same',
                'kernel_regularizer': l2(5e-4)
            }
            x2 = Conv2D(filters=filter_num, dilation_rate=[1,2],kernel_size=(3, 3), **params)(x)
            x3 = Conv2D(filters=filter_num, dilation_rate=[1,2],kernel_size=(5, 5), **params)(x)
            x4 = Conv2D(filters=filter_num, kernel_size=(3, 5), **params)(x)
            x = concatenate([x2, x3, x4])
            # x = BatchNormalization()(x)
            x = clipped_relu(x)
            return x

        return f


    # Multi-scale CNN
    input_tensor1 = Input(shape=input_shape)
    # block1
    x = Conv2D(filters= 32, kernel_size=(3, 3), strides=1, padding='same', activation=None)(input_tensor1)
    x = clipped_relu(x)
    # print(x)
    # block2
    x = MSB(16)(x)
    # block3
    x = MSB(32)(x)
    x = MSB(32)(x)
    x = MaxPooling2D(pool_size=[2, 2])(x)
    x = MSB_mini(32)(x)
    x = MSB_mini(32)(x)
    x = Lambda(lambda y: K.mean(y, axis=1), name='average2')(x)
    x = Bidirectional(LSTM(32))(x)
    # print(x.shape)
    # x = Reshape((64,))(x)
    x = Dense(512)(x)
    # print(x.shape)
    output1 = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)
    model = tf.keras.Model(input_tensor1,output1)

    return model



def identity_block(input_tensor, kernel_size, filters, stage, block):
    conv_name_base = 'res{}_{}_branch'.format(stage, block)

    x = Conv2D(filters,
                   kernel_size=kernel_size,
                   strides=1,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.00001),
                   name=conv_name_base + '_2a')(input_tensor)
    x = BatchNormalization(name=conv_name_base + '_2a_bn')(x)
    x = clipped_relu(x)

    x = Conv2D(filters,
                   kernel_size=kernel_size,
                   strides=1,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.00001),
                   name=conv_name_base + '_2b')(x)
    x = BatchNormalization(name=conv_name_base + '_2b_bn')(x)


    x = layers.add([x, input_tensor])
    x = clipped_relu(x)
    return x

def identity_block2(input_tensor, kernel_size, filters, stage, block):   # next step try full-pre activation
    conv_name_base = 'res{}_{}_branch'.format(stage, block)

    x = Conv2D(filters,
                   kernel_size=1,
                   strides=1,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.00001),
                   name=conv_name_base + '_conv1_1')(input_tensor)
    x = BatchNormalization(name=conv_name_base + '_conv1.1_bn')(x)
    x = clipped_relu(x)

    x = Conv2D(filters,
               kernel_size=kernel_size,
               strides=1,
               activation=None,
               padding='same',
               kernel_initializer='glorot_uniform',
               kernel_regularizer=regularizers.l2(l=0.00001),
               name=conv_name_base + '_conv3')(x)
    x = BatchNormalization(name=conv_name_base + '_conv3_bn')(x)
    x = clipped_relu(x)

    x = Conv2D(filters,
                   kernel_size=1,
                   strides=1,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.00001),
                   name=conv_name_base + '_conv1_2')(x)
    x = BatchNormalization(name=conv_name_base + '_conv1.2_bn')(x)

    x = layers.add([x, input_tensor])
    x = clipped_relu(x)
    return x


def convolutional_model(input_shape=(NUM_FRAMES,64, 1),    #input_shape(32,32,3)
                        batch_size=BATCH_SIZE * TRIPLET_PER_BATCH , num_frames=NUM_FRAMES):
    # http://cs231n.github.io/convolutional-networks/
    # conv weights
    # #params = ks * ks * nb_filters * num_channels_input

    # Conv128-s
    # 5*5*128*128/2+128
    # ks*ks*nb_filters*channels/strides+bias(=nb_filters)

    # take 100 ms -> 4 frames.
    # if signal is 3 seconds, then take 100ms per 100ms and average out this network.
    # 8*8 = 64 features.

    # used to share all the layers across the inputs

    # num_frames = K.shape() - do it dynamically after.

    def conv_and_res_block(inp, filters, stage):
        conv_name = 'conv{}-s'.format(filters)
        o = Conv2D(filters,
                       kernel_size=5,
                       strides=2,
                       padding='same',
                       kernel_initializer='glorot_uniform',
                       kernel_regularizer=regularizers.l2(l=0.00001), name=conv_name)(inp)
        o = BatchNormalization(name=conv_name + '_bn')(o)
        o = clipped_relu(o)
        for i in range(3):
            o = identity_block(o, kernel_size=3, filters=filters, stage=stage, block=i)
        return o

    def cnn_component(inp):
        x_ = conv_and_res_block(inp, 64, stage=1)
        x_ = conv_and_res_block(x_, 128, stage=2)
        x_ = conv_and_res_block(x_, 256, stage=3)
        x_ = conv_and_res_block(x_, 512, stage=4)
        return x_

    inputs = Input(shape=input_shape)  # TODO the network should be definable without explicit batch shape
    #x = Lambda(lambda y: K.reshape(y, (batch_size*num_frames,input_shape[1], input_shape[2], input_shape[3])), name='pre_reshape')(inputs)
    x = cnn_component(inputs)  # .shape = (BATCH_SIZE , num_frames/16, 64/16, 512)
    #x = Reshape((-1,2048))(x)
    x = Lambda(lambda y: K.reshape(y, (-1, math.ceil(num_frames/16), 2048)), name='reshape')(x)
    x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x)  #shape = (BATCH_SIZE, 512)
    x = Dense(512, name='affine')(x)  # .shape = (BATCH_SIZE , 512)
    x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)

    model = Model(inputs, x, name='convolutional')

    #print(model.summary())
    return model



def convolutional_model_simple(input_shape=(NUM_FRAMES,64, 1),    #input_shape(32,32,3)
                        batch_size=BATCH_SIZE * TRIPLET_PER_BATCH , num_frames=NUM_FRAMES):
    # http://cs231n.github.io/convolutional-networks/
    # conv weights
    # #params = ks * ks * nb_filters * num_channels_input

    # Conv128-s
    # 5*5*128*128/2+128
    # ks*ks*nb_filters*channels/strides+bias(=nb_filters)

    # take 100 ms -> 4 frames.
    # if signal is 3 seconds, then take 100ms per 100ms and average out this network.
    # 8*8 = 64 features.

    # used to share all the layers across the inputs

    # num_frames = K.shape() - do it dynamically after.

    def conv_and_res_block(inp, filters, stage):
        conv_name = 'conv{}-s'.format(filters)
        o = Conv2D(filters,
                       kernel_size=5,
                       strides=2,
                       padding='same',
                       kernel_initializer='glorot_uniform',
                       kernel_regularizer=regularizers.l2(l=0.00001), name=conv_name)(inp)
        o = BatchNormalization(name=conv_name + '_bn')(o)
        o = clipped_relu(o)
        for i in range(3):
            o = identity_block2(o, kernel_size=3, filters=filters, stage=stage, block=i)
        return o

    def cnn_component(inp):
        x_ = conv_and_res_block(inp, 64, stage=1)
        x_ = conv_and_res_block(x_, 128, stage=2)
        x_ = conv_and_res_block(x_, 256, stage=3)
        #x_ = conv_and_res_block(x_, 512, stage=4)
        return x_

    inputs = Input(shape=input_shape)  # TODO the network should be definable without explicit batch shape
    x = cnn_component(inputs)  # .shape = (BATCH_SIZE , num_frames/8, 64/8, 512)
    x = Lambda(lambda y: K.reshape(y, (-1, math.ceil(num_frames / 8), 2048)), name='reshape')(x)
    x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x)  #shape = (BATCH_SIZE, 512)
    x = Dense(512, name='affine')(x)  # .shape = (BATCH_SIZE , 512)
    x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)

    model = Model(inputs, x, name='convolutional')

    return model

def recurrent_model(input_shape=(NUM_FRAMES, 64, 1),
                    batch_size=BATCH_SIZE * TRIPLET_PER_BATCH ,num_frames=NUM_FRAMES):
    inputs = Input(shape=input_shape)
    #x = Permute((2,1))(inputs)
    x = Conv2D(64,kernel_size=5,strides=2,padding='same',kernel_initializer='glorot_uniform',kernel_regularizer=regularizers.l2(l=0.0001))(inputs)
    x = BatchNormalization()(x)  #shape = (BATCH_SIZE , num_frames/2, 64/2, 64)
    x = clipped_relu(x)
    x = Lambda(lambda y: K.reshape(y, (-1, math.ceil(num_frames / 2), 2048)), name='reshape')(x) #shape = (BATCH_SIZE , num_frames/2, 2048)
    x = GRU(1024,return_sequences=True)(x)  #shape = (BATCH_SIZE , num_frames/2, 1024)
    x = GRU(1024,return_sequences=True)(x)
    x = GRU(1024,return_sequences=True)(x)  #shape = (BATCH_SIZE , num_frames/2, 1024)
    x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x) #shape = (BATCH_SIZE, 1024)
    x = Dense(512)(x)  #shape = (BATCH_SIZE, 512)
    x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)

    model = Model(inputs,x,name='recurrent')

    #print(model.summary())
    return model


# Defining x-vector model. Input = MFCC frames with 30 mfcc coefficients.


# Loading model weights from kaldi nnet3 file. For x-vector models,
# only TDNN and BatchNorm layers need to be initialized. We match
# the layers with components in the nnet3 model using names.



if __name__ == '__main__':
    m=multi_scale_model()
    print(m.summary())
