# -*-coding:utf-8-*-
# 如果使用softmax和mixup，求softmax的loss函数要能够接受软概率

from __future__ import print_function
import sys
import numpy as np
import keras.backend as K
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Bidirectional, TimeDistributed, Conv2D, MaxPooling2D, Input, GRU, Dense, Activation, Dropout, Reshape, Permute, GlobalAveragePooling2D, SpatialDropout2D, GlobalAveragePooling1D, GlobalMaxPooling1D, Concatenate
from models.modules import conv2d_bn, inception_block
sys.setrecursionlimit(10000)
np.set_printoptions(threshold=sys.maxsize)
if K.image_data_format() == 'channels_first':
    CHANNEL_AXIS = 1
else:
    CHANNEL_AXIS = -1
CURRENT_VERBOSITY = 0


def crnn(spec_x, num_classes, label_level, dropout_rate, filters, pool_size_lists, rnn_lists, dnn_lists):
    # 网络输出为序列
    assert label_level == 'frame'

    for _i, _cnt in enumerate(pool_size_lists):
        spec_x = Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(spec_x)
        spec_x = BatchNormalization(axis=1)(spec_x)
        spec_x = Activation('relu')(spec_x)
        spec_x = MaxPooling2D(pool_size=(1, pool_size_lists[_i]))(spec_x)  # 只在频率上进行池化
        spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = Permute((2, 1, 3))(spec_x)                              # [batch_size, t_len, channels, f_dim]
    out_shape = spec_x._keras_shape
    spec_x = Reshape((-1, out_shape[-1] * out_shape[-2]))(spec_x)

    for _r in rnn_lists:
        spec_x = Bidirectional(
            GRU(_r, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate, return_sequences=True),
            merge_mode='mul')(spec_x)

    for _f in dnn_lists:
        spec_x = TimeDistributed(Dense(_f))(spec_x)
        spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = TimeDistributed(Dense(num_classes))(spec_x)
    output = Activation('sigmoid', name='strong_out')(spec_x)

    return output


# 在时间上加入下采样
def crnn_v2(spec_x, num_classes, label_level, dropout_rate, filters, pool_size_lists, rnn_lists, dnn_lists):
    # 网络输出为序列
    assert label_level == 'frame'

    spec_x = Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(spec_x)
    spec_x = BatchNormalization(axis=1)(spec_x)
    spec_x = Activation('relu')(spec_x)
    spec_x = MaxPooling2D(pool_size=(4, 5))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(spec_x)
    spec_x = BatchNormalization(axis=1)(spec_x)
    spec_x = Activation('relu')(spec_x)
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)  # 只在频率上进行池化
    spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(spec_x)
    spec_x = BatchNormalization(axis=1)(spec_x)
    spec_x = Activation('relu')(spec_x)
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = Permute((2, 1, 3))(spec_x)                              # [batch_size, t_len, channels, f_dim]
    out_shape = spec_x._keras_shape
    spec_x = Reshape((-1, out_shape[-1] * out_shape[-2]))(spec_x)

    for _r in rnn_lists:
        spec_x = Bidirectional(
            GRU(_r, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate, return_sequences=True),
            merge_mode='mul')(spec_x)

    for _f in dnn_lists:
        spec_x = TimeDistributed(Dense(_f))(spec_x)
        spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = TimeDistributed(Dense(num_classes))(spec_x)
    output = Activation('sigmoid', name='strong_out')(spec_x)

    return output


def inception_crnn(spec_x, num_classes, label_level, dropout_rate, filters=64, rnn_lists=[32]):
    '''Inception-CRNN Network
    '''
    # 网络输出为序列
    assert label_level == 'frame'

    # Stem Layer
    spec_x = conv2d_bn(spec_x, filters=filters, kernel=[3, 3])
    spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = conv2d_bn(spec_x, filters=filters, kernel=[3, 3])
    spec_x = Dropout(dropout_rate)(spec_x)

    # Inception Layer
    spec_x = inception_block(spec_x)                        # [batch_size, channels, t_len, f_dim]
    spec_x = MaxPooling2D(pool_size=(1, 4))(spec_x)         # 只在频率上进行池化
    spec_x = inception_block(spec_x)
    spec_x = MaxPooling2D(pool_size=(1, 4))(spec_x)
    spec_x = inception_block(spec_x)
    spec_x = MaxPooling2D(pool_size=(1, 4))(spec_x)

    # RNN Layer
    spec_x = Permute((2, 1, 3))(spec_x)                     # [batch_size, t_len, channels, f_dim]
    x_shape = spec_x._keras_shape
    spec_x = Reshape((-1, x_shape[-1] * x_shape[-2]))(spec_x)
    for _r in rnn_lists:
        spec_x = Bidirectional(
            GRU(_r, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate, return_sequences=True),
            merge_mode='mul')(spec_x)

    spec_x = TimeDistributed(Dense(num_classes))(spec_x)

    assert label_level == 'frame'
    output = Activation('sigmoid', name='strong_out')(spec_x)
    return output


def inception(spec_x, num_classes, label_level, dropout_rate, dnn_lists):
    # 网络输出为单帧，块级别标注
    assert label_level == 'chunk'

    # Stem Layer
    spec_x = conv2d_bn(spec_x, filters=64, kernel=[3, 3])
    spec_x = MaxPooling2D(pool_size=(4, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = conv2d_bn(spec_x, filters=64, kernel=[3, 3])
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    # Inception Layer
    spec_x = inception_block(spec_x)                        # [batch_size, channels, t_len, f_dim]
    spec_x = MaxPooling2D(pool_size=(2, 4))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = inception_block(spec_x)
    spec_x = MaxPooling2D(pool_size=(2, 4))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    # 将feature map个数映射到输出个数，并对所有feature map进行全局池化
    spec_x = conv2d_bn(spec_x, filters=num_classes, kernel=[3, 3])
    spec_x = GlobalAveragePooling2D()(spec_x)

    # and a logistic layer
    output = Dense(num_classes, activation='softmax')(spec_x)

    return output


    # # create the base pre-trained model
    # base_model = InceptionV3(weights='imagenet', include_top=False)
    # # freeze all convolutional InceptionV3 layers
    # for layer in base_model.layers:
    #     layer.trainable = False
    # spec_x = base_model.output


def vgg(spec_x, num_classes, label_level, dropout_rate, dnn_lists):
    # 网络输出为单帧，块级别标注
    assert label_level == 'chunk'

    spec_x = conv2d_bn(spec_x, filters=32, kernel=[5, 5], strides=2)
    spec_x = conv2d_bn(spec_x, filters=32, kernel=[3, 3])
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = conv2d_bn(spec_x, filters=64, kernel=[3, 3])
    spec_x = conv2d_bn(spec_x, filters=64, kernel=[3, 3])
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = conv2d_bn(spec_x, filters=128, kernel=[3, 3])
    spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = conv2d_bn(spec_x, filters=128, kernel=[3, 3])
    spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = conv2d_bn(spec_x, filters=256, kernel=[3, 3])
    spec_x = Dropout(dropout_rate)(spec_x)
    spec_x = conv2d_bn(spec_x, filters=256, kernel=[3, 3])
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    spec_x = conv2d_bn(spec_x, filters=512, kernel=[3, 3])
    spec_x = MaxPooling2D(pool_size=(2, 2))(spec_x)
    spec_x = Dropout(dropout_rate)(spec_x)

    # 将feature map个数映射到输出个数，并对所有feature map进行全局池化
    spec_x = conv2d_bn(spec_x, filters=num_classes, kernel=[3, 3])
    spec_x = GlobalAveragePooling2D()(spec_x)

    # and a logistic layer
    output = Dense(num_classes, activation='softmax')(spec_x)

    return output


def crnn_v1(spec_x, num_classes, label_level):
    # 网络输出为序列
    assert label_level == 'chunk'

    # ---- mel convolution part ----
    block1 = Conv2D(filters=64, kernel_size=(3, 3), padding="same")(spec_x)
    block1 = BatchNormalization()(block1)
    block1 = Activation(activation="relu")(block1)
    block1 = MaxPooling2D(pool_size=(4, 4))(block1)
    block1 = SpatialDropout2D(0.3, data_format=K.image_data_format())(block1)

    block2 = Conv2D(filters=128, kernel_size=(3, 3), padding="same")(block1)
    block2 = BatchNormalization()(block2)
    block2 = Activation(activation="relu")(block2)
    block2 = MaxPooling2D(pool_size=(4, 4))(block2)
    block2 = SpatialDropout2D(0.3, data_format=K.image_data_format())(block2)

    block3 = Conv2D(filters=256, kernel_size=(3, 3), padding="same")(block2)
    block3 = BatchNormalization()(block3)
    block3 = Activation(activation="relu")(block3)
    block3 = MaxPooling2D(pool_size=(2, 4))(block3)
    block3 = SpatialDropout2D(0.3, data_format=K.image_data_format())(block3)   # [batch_size, channels, t_len, f_dim]

    block3 = Permute((2, 1, 3))(block3)                              # [batch_size, t_len, channels, f_dim]
    out_shape = block3._keras_shape
    block3 = Reshape((-1, out_shape[-1] * out_shape[-2]))(block3)    # [batch_size, t_len, channels*f_dim]

    # ---- rnn part ----
    gru = Bidirectional(
        GRU(kernel_initializer='glorot_uniform', activation='tanh', recurrent_dropout=0.1,
            dropout=0.1, units=32, return_sequences=True)
    )(block3)
    gru = Dropout(0.1)(gru)
    gru = Bidirectional(
        GRU(kernel_initializer='glorot_uniform', activation='tanh', recurrent_dropout=0.1,
            dropout=0.1, units=32, return_sequences=True)
    )(block3)
    gru = Dropout(0.1)(gru)

    dense = TimeDistributed(
        Dense(64, activation="relu"),
    )(gru)
    dense = Dropout(0.1)(dense)
    dense = TimeDistributed(
        Dense(num_classes, activation="sigmoid"),
    )(dense)

    # ---- pooling part ----
    pool1 = GlobalAveragePooling1D()(dense)
    pool2 = GlobalMaxPooling1D()(dense)
    dense = Concatenate()([pool1, pool2])

    # dense = Dense(512, activation="relu")(dense)
    # dense = Dropout(rate=0.1)(dense)

    output = Dense(num_classes, activation="sigmoid", name="output")(dense)

    return output
