# coding=utf-8
import keras
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, \
    BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.layers import Reshape, Dense, Dropout, Lambda, Activation

from keras.initializers import glorot_uniform
from keras.optimizers import Adam
from keras import backend as K
from keras.models import Model
from keras.utils import multi_gpu_model
import tensorflow as tf


def am_hparams():
    params = tf.contrib.training.HParams(
        # vocab
        vocab_size=None,
        lr=0.0008,
        gpu_nums=1,
        AUDIO_LENGTH=None,
        is_training=True)
    return params


# =============================搭建模型====================================
class Am():
    """docstring for Amodel."""

    def __init__(self, args):
        self.vocab_size = args.vocab_size
        self.gpu_nums = args.gpu_nums
        self.lr = args.lr
        self.is_training = args.is_training
        self.AUDIO_LENGTH = args.AUDIO_LENGTH
        self.AUDIO_FEATURE_LENGTH = 200  # 特征长度
        self._model_init()
        if self.is_training:
            self._ctc_init()
            self.opt_init()
            print("模型完成【编译】...")

    def _model_init(self):
        """
        		定义DFCNN模型，使用函数式模型
        		输入层：200维的特征值序列，一条语音数据的最大长度设为1600（大约16s）
        		隐藏层：卷积池化层，卷积核大小为3x3，池化窗口大小为2
        		隐藏层：全连接层
        		输出层：全连接层，神经元数量为self.vocab_size，使用softmax作为激活函数，
        		CTC层：使用CTC的loss作为损失函数，实现连接性时序多输出
        """
        self.inputs = Input(name='the_inputs', shape=(self.AUDIO_LENGTH, self.AUDIO_FEATURE_LENGTH, 1))
        layer_h1 = Conv2D(32, (3, 3), use_bias=False, activation='relu', padding='same',
                          kernel_initializer='he_normal')(self.inputs)  # 卷积层
        layer_h1 = BatchNormalization(mode=0, axis=-1)(layer_h1)
        # layer_h1 = Dropout(0.05)(layer_h1)
        layer_h2 = Conv2D(32, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(
            layer_h1)  # 卷积层
        layer_h2 = BatchNormalization(axis=-1)(layer_h2)
        layer_h3 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h2)  # 池化层 800*100
        # layer_h3 = Dropout(0.2)(layer_h2) # 随机中断部分神经网络连接，防止过拟合
        # layer_h3 = Dropout(0.05)(layer_h3) # 随机中断部分神经网络连接，防止过拟合
        # layer_h3 = BatchNormalization(axis=1)(layer_h3)
        layer_h4 = Conv2D(64, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(
            layer_h3)  # 卷积层
        layer_h4 = BatchNormalization(axis=-1)(layer_h4)
        # layer_h5 = Conv2D(64, (3, 3), use_bias=True, activation='relu', padding='same', kernel_initializer='he_normal')(
        #     layer_h4)  # 卷积层
        # layer_h5 = BatchNormalization(axis=-1)(layer_h5)
        # layer_h6 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h5)  # 池化层 400*50
        # # layer_h6 = Dropout(0.1)(layer_h6)
        # layer_h7 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',
        #                   kernel_initializer='he_normal')(layer_h6)  # 卷积层
        # layer_h7 = BatchNormalization(axis=-1)(layer_h7)
        # layer_h8 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',
        #                   kernel_initializer='he_normal')(layer_h7)  # 卷积层
        # layer_h8 = BatchNormalization(axis=-1)(layer_h8)
        # layer_h9 = MaxPooling2D(pool_size=2, strides=None, padding="valid")(layer_h8)  # 池化层 200*25
        #
        # # layer_h9 = Dropout(0.15)(layer_h9)
        # # layer_h9 = BatchNormalization(axis=-1)(layer_h9)
        # layer_h10 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',
        #                    kernel_initializer='he_normal')(layer_h9)  # 卷积层
        # layer_h10 = BatchNormalization(axis=-1)(layer_h10)
        # layer_h11 = Conv2D(128, (3, 3), use_bias=True, activation='relu', padding='same',
        #                    kernel_initializer='he_normal')(layer_h10)  # 卷积层
        # 加入卷积残差块
        layer_h5 = convolutional_block(layer_h4, f=3, filters=[64, 64, 256], stage=2, block="a", s=1)
        layer_h5 = identity_block(layer_h5, f=3, filters=[64, 64, 256], stage=2, block="a2")
        layer_h5 = identity_block(layer_h5, f=3, filters=[64, 64, 256], stage=2, block="a3")

        layer_h6 = convolutional_block(layer_h5, f=3, filters=[128, 128, 512], stage=3, block="b", s=2)
        layer_h6 = identity_block(layer_h6, f=3, filters=[128, 128, 512], stage=3, block="b1")
        layer_h6 = identity_block(layer_h6, f=3, filters=[128, 128, 512], stage=3, block="b2")
        layer_h6 = identity_block(layer_h6, f=3, filters=[128, 128, 512], stage=3, block="b3")

        layer_h11 = BatchNormalization(axis=-1)(layer_h6)
        layer_h12 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h11)

        # layer_h12 = BatchNormalization(axis=-1)(layer_h12)
        layer_h13 = convolutional_block(layer_h12, f=3, filters=[128, 128, 512], stage=3, block="c", s=2)
        layer_h14 = convolutional_block(layer_h13, f=3, filters=[128, 128, 512], stage=3, block="c1", s=2)
        layer_h15 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h14)
        layer_h15 = MaxPooling2D(pool_size=1, strides=None, padding="valid")(layer_h15)
        # test=Model(inputs = input_data, outputs = layer_h12)
        # test.summary()
        layer_h16 = Reshape((-1, int(self.AUDIO_FEATURE_LENGTH * 128 / 8)))(layer_h15)  # Reshape层
        # layer_h5 = LSTM(256, activation='relu', use_bias=True, return_sequences=True)(layer_h4) # LSTM层
        # layer_h6 = Dropout(0.2)(layer_h5) # 随机中断部分神经网络连接，防止过拟合
        layer_h16 = Dropout(0.3)(layer_h16)
        # layer_h17 = BatchNormalization(axis=-1)(layer_h16)
        layer_h17 = Dense(128, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_h16)  # 全连接层
        layer_h17 = BatchNormalization(axis=-1)(layer_h17)
        layer_h17 = Dropout(0.3)(layer_h17)
        layer_h18 = Dense(self.vocab_size, use_bias=True, kernel_initializer='he_normal')(layer_h17)  # 全连接层
        layer_h18 = BatchNormalization(axis=-1)(layer_h18)
        self.outputs = Activation('softmax', name='Activation0')(layer_h18)
        self.model = Model(inputs=self.inputs, outputs=self.outputs)
        self.model.summary()

    def _ctc_init(self):
        self.labels = Input(name='the_labels', shape=[None], dtype='float32')
        self.input_length = Input(name='input_length', shape=[1], dtype='int64')
        self.label_length = Input(name='label_length', shape=[1], dtype='int64')
        self.loss_out = Lambda(ctc_lambda, output_shape=(1,), name='ctc') \
            ([self.labels, self.outputs, self.input_length, self.label_length])
        self.ctc_model = Model(inputs=[self.labels, self.inputs,
                                       self.input_length, self.label_length], outputs=self.loss_out)

    def opt_init(self):
        opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, decay=0.01, epsilon=10e-8)
        if self.gpu_nums > 1:
            self.ctc_model = multi_gpu_model(self.ctc_model, gpus=self.gpu_nums)
        self.ctc_model.compile(loss={'ctc': lambda y_true, output: output}, optimizer=opt)


# ============================模型组件=================================
def conv2d(size):
    return Conv2D(size, (3, 3), use_bias=True, activation='relu',
                  padding='same', kernel_initializer='he_normal')


def norm(x):
    return BatchNormalization(axis=-1)(x)


def maxpool(x):
    return MaxPooling2D(pool_size=(2, 2), strides=None, padding="valid")(x)


def dense(units, activation="relu"):
    return Dense(units, activation=activation, use_bias=True,
                 kernel_initializer='he_normal')


# x.shape=(none, none, none)
# output.shape = (1/2, 1/2, 1/2)
def cnn_cell(size, x, pool=True):
    x = norm(conv2d(size)(x))
    x = norm(conv2d(size)(x))
    if pool:
        x = maxpool(x)
    return x


def ctc_lambda(args):
    labels, y_pred, input_length, label_length = args
    y_pred = y_pred[:, :, :]
    return K.ctc_batch_cost(labels, y_pred, input_length, label_length)


# 卷积残差块——convolutional_block
def convolutional_block(X, f, filters, stage, block, s=2):
    """
    param :
    X -- 输入的张量，维度为（m, n_H_prev, n_W_prev, n_C_prev）
    f -- 整数，指定主路径的中间 CONV 窗口的形状（过滤器大小，ResNet中f=3）
    filters -- python整数列表，定义主路径的CONV层中过滤器的数目
    stage -- 整数，用于命名层，取决于它们在网络中的位置
    block --字符串/字符，用于命名层，取决于它们在网络中的位置
    s -- 整数，指定使用的步幅
    return:
    X -- 卷积残差块的输出，维度为：(n_H, n_W, n_C)
    """
    # 定义基本的名字
    conv_name_base = "res" + str(stage) + block + "_branch"
    bn_name_base = "bn" + str(stage) + block + "_branch"

    # 过滤器
    F1, F2, F3 = filters

    # 保存输入值,后面将需要添加回主路径
    X_shortcut = X

    # 主路径第一部分
    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding="valid",
               name=conv_name_base + "2a", kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2a")(X)
    X = Activation("relu")(X)

    # 主路径第二部分
    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding="same",
               name=conv_name_base + "2b", kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2b")(X)
    X = Activation("relu")(X)

    # 主路径第三部分
    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding="valid",
               name=conv_name_base + "2c", kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2c")(X)

    # shortcut路径
    X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding="valid",
                        name=conv_name_base + "1", kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + "1")(X_shortcut)

    # 主路径最后部分,为主路径添加shortcut并通过relu激活
    X = layers.add([X, X_shortcut])
    X = Activation("relu")(X)

    return X


# 恒等模块——identity_block
def identity_block(X, f, filters, stage, block):
    """
    三层的恒等残差块
    param :
    X -- 输入的张量，维度为（m, n_H_prev, n_W_prev, n_C_prev）
    f -- 整数，指定主路径的中间 CONV 窗口的形状
    filters -- python整数列表，定义主路径的CONV层中的过滤器数目
    stage -- 整数，用于命名层，取决于它们在网络中的位置
    block --字符串/字符，用于命名层，取决于它们在网络中的位置
    return:
    X -- 三层的恒等残差块的输出，维度为：(n_H, n_W, n_C)
    """
    # 定义基本的名字
    conv_name_base = "res" + str(stage) + block + "_branch"
    bn_name_base = "bn" + str(stage) + block + "_branch"

    # 过滤器
    F1, F2, F3 = filters

    # 保存输入值,后面将需要添加回主路径
    X_shortcut = X

    # 主路径第一部分
    X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding="valid",
               name=conv_name_base + "2a", kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2a")(X)
    X = Activation("relu")(X)

    # 主路径第二部分
    X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding="same",
               name=conv_name_base + "2b", kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2b")(X)
    X = Activation("relu")(X)

    # 主路径第三部分
    X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding="valid",
               name=conv_name_base + "2c", kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2c")(X)

    # 主路径最后部分,为主路径添加shortcut并通过relu激活
    X = layers.add([X, X_shortcut])
    X = Activation("relu")(X)

    return X
