# coding=utf-8
import keras
from keras.layers import Input, Conv2D, BatchNormalization, MaxPooling2D
from keras.layers import Reshape, Dense, Dropout, Lambda
from keras.optimizers import Adam
from keras import backend as K
from keras.models import Model
from keras.utils import multi_gpu_model
import tensorflow as tf
from model_speech.AMmodel.layers.multihead_attention import MultiHeadAttention
from model_speech.AMmodel.layers.switchnorm import SwitchNormalization
from model_speech.AMmodel.wav_model import WavePickModel
from model_speech.AMmodel.ctc_wrap_cfm import CtcModel
from model_speech.utils.tools import merge_two_last_dims
from model_speech.utils.text_featurizers import TextFeaturizer
from model_speech.AMmodel.transducer_wrap_cfm import Transducer
from model_speech.AMmodel.las_wrap_cfm import LAS, LASConfig
from model_speech.utils.speech_featurizers import SpeechFeaturizer
import argparse
from model_speech.utils.user_config import UserConfig


def am_hparams():
    # params = tf.contrib.training.HParams(
    #     # vocab
    #     vocab_size=None,
    #     lr=0.0008,
    #     gpu_nums=1,
    #     is_training=True)
    class params:
        def __init__(self):
            self.vocab_size = None
            self.lr = 0.0008
            self.gpu_nums = 1
            self.is_training = True

    return params()


# =============================搭建模型====================================
class Am:
    """docstring for Amodel."""

    def __init__(self, args):
        self.vocab_size = args.vocab_size
        self.gpu_nums = args.gpu_nums
        self.lr = args.lr
        self.is_training = args.is_training
        parse = argparse.ArgumentParser()
        parse.add_argument('--data_config', type=str, default='./configs/am_data.yml', help='the am data configs path')
        parse.add_argument('--model_config', type=str, default='./configs/conformerL.yml',
                           help='the am model configs path')
        args = parse.parse_args()

        config = UserConfig(args.data_config, args.model_config)
        self.learning_config = config['learning_config']
        self.model_config = config['model_config']
        self.speech_config = config['speech_config']
        self.config = config
        self._model_init()
        if self.is_training:
            # self._ctc_init()
            self.opt_init()

    # conformer
    def _model_init(self):
        training = True
        ysize = 200
        self.num_classes = 80
        # self.inputs = tf.keras.layers.Input(name='the_inputs', shape=(None, ysize, 1))
        # self.model_config.update({'vocabulary_size': self.num_classes})
        # self.model_config.update({'speech_config': self.speech_config})
        ysize = 200
        self.inputs = Input(name='the_inputs', shape=(None, ysize, 1))
        self.encoder = ConformerEncoder()

        # self.model_ConformerCTC.summary()


        # self.outputs = dense(self.vocab_size, activation='softmax')(self.model_ConformerCTC)
        # self.model = Model(inputs=self.inputs, outputs=self.outputs)
        #
        # self.model.summary()

    def _ctc_init(self):
        self.labels = Input(name='the_labels', shape=[None], dtype='float32')
        self.input_length = Input(name='input_length', shape=[1], dtype='int64')
        self.label_length = Input(name='label_length', shape=[1], dtype='int64')
        self.loss_out = Lambda(ctc_lambda, output_shape=(1,), name='ctc') \
            ([self.labels, self.outputs, self.input_length, self.label_length])
        self.ctc_model = Model(inputs=[self.labels, self.inputs,
                                       self.input_length, self.label_length], outputs=self.loss_out)
    #
    def opt_init(self):
        opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, decay=0.01, epsilon=10e-8)
        if self.gpu_nums > 1:
            self.model_ConformerCTC = multi_gpu_model(self.model_ConformerCTC, gpus=self.gpu_nums)
        self.model_ConformerCTC.compile(loss={'ctc': lambda y_true, output: output}, optimizer=opt)


# ============================模型组件=================================
# def conv2d(size):
#     return Conv2D(size, (3, 3), use_bias=True, activation='relu',
#                   padding='same', kernel_initializer='he_normal')
#
#
# def norm(x):
#     return BatchNormalization(axis=-1)(x)
#
#
# def maxpool(x):
#     return MaxPooling2D(pool_size=(2, 2), strides=None, padding="valid")(x)
#
#
# def dense(units, activation="relu"):
#     return Dense(units, activation=activation, use_bias=True,
#                  kernel_initializer='he_normal')
#
#
# # x.shape=(none, none, none)
# # output.shape = (1/2, 1/2, 1/2)
# def cnn_cell(size, x, pool=True):
#     x = norm(conv2d(size)(x))
#     x = norm(conv2d(size)(x))
#     if pool:
#         x = maxpool(x)
#     return x
#
#
def ctc_lambda(args):
    labels, y_pred, input_length, label_length = args
    y_pred = y_pred[:, :, :]
    return K.ctc_batch_cost(labels, y_pred, input_length, label_length)


class GLU(tf.keras.layers.Layer):
    def __init__(self,
                 axis=-1,
                 name="glu_activation",
                 **kwargs):
        super(GLU, self).__init__(name=name, **kwargs)
        self.axis = axis

    def call(self, inputs, **kwargs):
        a, b = tf.split(inputs, 2, axis=self.axis)
        b = tf.nn.sigmoid(b)
        return tf.multiply(a, b)

    def get_config(self):
        conf = super(GLU, self).get_config()
        conf.update({"axis": self.axis})
        return conf


class ConvSubsampling(tf.keras.layers.Layer):
    def __init__(self,
                 odim: int,
                 reduction_factor: int = 4,
                 dropout: float = 0.0,
                 name="conv_subsampling",
                 **kwargs):
        super(ConvSubsampling, self).__init__(name=name, **kwargs)
        assert reduction_factor % 2 == 0, "reduction_factor must be divisible by 2"
        self.conv1 = tf.keras.layers.Conv2D(
            filters=odim, kernel_size=(3, 3),
            strides=((reduction_factor // 2), 2),
            padding="same", activation="relu"
        )
        self.conv2 = tf.keras.layers.Conv2D(
            filters=odim, kernel_size=(3, 3),
            strides=(2, 2), padding="same",
            activation="relu"
        )
        self.linear = tf.keras.layers.Dense(odim)
        self.do = tf.keras.layers.Dropout(dropout)

    # @tf.function(experimental_relax_shapes=True)
    def call(self, inputs, training=False, **kwargs):
        outputs = self.conv1(inputs, training=training)
        outputs = self.conv2(outputs, training=training)

        outputs = merge_two_last_dims(outputs)
        outputs = self.linear(outputs, training=training)
        return self.do(outputs, training=training)

    def get_config(self):
        conf = super(ConvSubsampling, self).get_config()
        conf.update(self.conv1.get_config())
        conf.update(self.conv2.get_config())
        conf.update(self.linear.get_config())
        conf.update(self.do.get_config())
        return conf


class FFModule(tf.keras.layers.Layer):
    def __init__(self,
                 input_dim,
                 dropout=0.0,
                 fc_factor=0.5,
                 name="ff_module",
                 **kwargs):
        super(FFModule, self).__init__(name=name, **kwargs)
        self.fc_factor = fc_factor
        self.ln = tf.keras.layers.LayerNormalization()
        self.ffn1 = tf.keras.layers.Dense(4 * input_dim)
        self.swish = tf.keras.layers.Activation(
            tf.keras.activations.swish, name="swish_activation")
        self.do1 = tf.keras.layers.Dropout(dropout)
        self.ffn2 = tf.keras.layers.Dense(input_dim)
        self.do2 = tf.keras.layers.Dropout(dropout)
        self.res_add = tf.keras.layers.Add()

    # @tf.function(experimental_relax_shapes=True)
    def call(self, inputs, training=False, **kwargs):
        outputs = self.ln(inputs, training=training)
        outputs = self.ffn1(outputs, training=training)
        outputs = self.swish(outputs)
        outputs = self.do1(outputs, training=training)
        outputs = self.ffn2(outputs, training=training)
        outputs = self.do2(outputs, training=training)
        outputs = self.res_add([inputs, self.fc_factor * outputs])
        return outputs

    def get_config(self):
        conf = super(FFModule, self).get_config()
        conf.update({"fc_factor": self.fc_factor})
        conf.update(self.ln.get_config())
        conf.update(self.ffn1.get_config())
        conf.update(self.swish.get_config())
        conf.update(self.do1.get_config())
        conf.update(self.ffn2.get_config())
        conf.update(self.do2.get_config())
        conf.update(self.res_add.get_config())
        return conf


class MHSAModule(tf.keras.layers.Layer):
    def __init__(self,
                 head_size,
                 num_heads,
                 dropout=0.0,
                 name="mhsa_module",
                 **kwargs):
        super(MHSAModule, self).__init__(name=name, **kwargs)
        # self.pc = PositionalEncoding()
        self.ln = tf.keras.layers.LayerNormalization()
        self.mha = MultiHeadAttention(head_size=head_size, num_heads=num_heads)
        self.do = tf.keras.layers.Dropout(dropout)
        self.res_add = tf.keras.layers.Add()

    # @tf.function(experimental_relax_shapes=True)
    def call(self, inputs, training=False, **kwargs):
        # outputs = self.pc(inputs)
        outputs = self.ln(inputs, training=training)
        outputs = self.mha([outputs, outputs, outputs], training=training)
        outputs = self.do(outputs, training=training)
        outputs = self.res_add([inputs, outputs])
        return outputs

    def get_config(self):
        conf = super(MHSAModule, self).get_config()
        # conf.update(self.pc.get_config())
        conf.update(self.ln.get_config())
        conf.update(self.mha.get_config())
        conf.update(self.do.get_config())
        conf.update(self.res_add.get_config())
        return conf


class ConvModule(tf.keras.layers.Layer):
    def __init__(self,
                 input_dim,
                 kernel_size=32,
                 dropout=0.0,
                 name="conv_module",
                 **kwargs):
        super(ConvModule, self).__init__(name=name, **kwargs)
        self.ln = tf.keras.layers.LayerNormalization()
        self.pw_conv_1 = tf.keras.layers.Conv1D(
            filters=2 * input_dim, kernel_size=1, strides=1,
            padding="same", name="pw_conv_1"
        )
        self.glu = GLU()
        self.dw_conv = tf.keras.layers.SeparableConv1D(
            filters=2 * input_dim, kernel_size=kernel_size, strides=1,
            padding="same", depth_multiplier=1, name="dw_conv"
        )
        self.bn = SwitchNormalization()
        self.swish = tf.keras.layers.Activation(
            tf.keras.activations.swish, name="swish_activation")
        self.pw_conv_2 = tf.keras.layers.Conv1D(filters=input_dim, kernel_size=1, strides=1,
                                                padding="same", name="pw_conv_2")
        self.do = tf.keras.layers.Dropout(dropout)
        self.res_add = tf.keras.layers.Add()

    # @tf.function(experimental_relax_shapes=True)
    def call(self, inputs, training=False, **kwargs):
        outputs = self.ln(inputs, training=training)
        outputs = self.pw_conv_1(outputs, training=training)
        outputs = self.glu(outputs)
        outputs = self.dw_conv(outputs, training=training)
        outputs = self.bn(outputs, training=training)
        outputs = self.swish(outputs)
        outputs = self.pw_conv_2(outputs, training=training)
        outputs = self.do(outputs, training=training)
        outputs = self.res_add([inputs, outputs])
        return outputs

    def get_config(self):
        conf = super(ConvModule, self).get_config()
        conf.update(self.ln.get_config())
        conf.update(self.pw_conv_1.get_config())
        conf.update(self.glu.get_config())
        conf.update(self.dw_conv.get_config())
        conf.update(self.bn.get_config())
        conf.update(self.swish.get_config())
        conf.update(self.pw_conv_2.get_config())
        conf.update(self.do.get_config())
        conf.update(self.res_add.get_config())
        return conf


class ConformerBlock(tf.keras.layers.Layer):
    def __init__(self,
                 input_dim,
                 dropout=0.0,
                 fc_factor=0.5,
                 head_size=144,
                 num_heads=4,
                 kernel_size=32,
                 name="conformer_block",
                 **kwargs):
        super(ConformerBlock, self).__init__(name=name, **kwargs)
        self.ffm1 = FFModule(input_dim=input_dim,
                             dropout=dropout, fc_factor=fc_factor,
                             name="ff_module_1")
        self.mhsam = MHSAModule(head_size=head_size, num_heads=num_heads,
                                dropout=dropout)
        self.convm = ConvModule(input_dim=input_dim, kernel_size=kernel_size,
                                dropout=dropout)
        self.ffm2 = FFModule(input_dim=input_dim,
                             dropout=dropout, fc_factor=fc_factor,
                             name="ff_module_2")
        self.ln = tf.keras.layers.LayerNormalization()

    # @tf.function(experimental_relax_shapes=True)
    def call(self, inputs, training=False, **kwargs):
        outputs = self.ffm1(inputs, training=training)
        outputs = self.mhsam(outputs, training=training)
        outputs = self.convm(outputs, training=training)
        outputs = self.ffm2(outputs, training=training)
        outputs = self.ln(outputs, training=training)
        return outputs

    def get_config(self):
        conf = super(ConformerBlock, self).get_config()
        conf.update(self.ffm1.get_config())
        conf.update(self.mhsam.get_config())
        conf.update(self.convm.get_config())
        conf.update(self.ffm2.get_config())
        conf.update(self.ln.get_config())
        return conf


class ConformerEncoder(tf.keras.Model):
    def __init__(self,
                 dmodel=144,
                 reduction_factor=4,
                 num_blocks=16,
                 head_size=36,
                 num_heads=4,
                 kernel_size=32,
                 fc_factor=0.5,
                 dropout=0.0,
                 add_wav_info=False,
                 hop_size=80,
                 name="conformer_encoder",
                 **kwargs):
        super(ConformerEncoder, self).__init__(name=name, **kwargs)
        self.dmodel = dmodel
        self.reduction_factor = reduction_factor
        self.conv_subsampling = ConvSubsampling(
            odim=dmodel, reduction_factor=reduction_factor,
            dropout=dropout
        )
        self.conformer_blocks = []
        self.add_wav_info = add_wav_info
        if self.add_wav_info:
            self.wav_layer = WavePickModel(dmodel, hop_size)
        for i in range(num_blocks):
            conformer_block = ConformerBlock(
                input_dim=dmodel,
                dropout=dropout,
                fc_factor=fc_factor,
                head_size=head_size,
                num_heads=num_heads,
                kernel_size=kernel_size,
                name=f"conformer_block_{i}"
            )
            self.conformer_blocks.append(conformer_block)

    # @tf.function()
    def call(self, inputs, training=False, **kwargs):
        # input with shape [B, T, V1, V2]
        if self.add_wav_info:
            mel_inputs, wav_inputs = inputs
            mel_outputs = self.conv_subsampling(mel_inputs, training=training)
            wav_outputs = self.wav_layer(wav_inputs, training=training)
            outputs = mel_outputs + wav_outputs
        else:
            outputs = self.conv_subsampling(inputs, training=training)
        encoder_outputs = []
        for cblock in self.conformer_blocks:
            outputs = cblock(outputs, training=training)
            encoder_outputs.append(outputs)
        return encoder_outputs

    def get_config(self):
        conf = super(ConformerEncoder, self).get_config()
        conf.update(self.conv_subsampling.get_config())
        for cblock in self.conformer_blocks:
            conf.update(cblock.get_config())
        return conf


class ConformerTransducer(Transducer):
    def __init__(self,
                 dmodel: int,
                 reduction_factor: int,
                 vocabulary_size: int,
                 num_blocks: int = 8,
                 head_size: int = 512,
                 num_heads: int = 4,
                 kernel_size: int = 32,
                 fc_factor: float = 0.5,
                 dropout: float = 0,
                 embed_dim: int = 512,
                 embed_dropout: int = 0,
                 num_lstms: int = 1,
                 lstm_units: int = 512,
                 joint_dim: int = 1024,
                 name: str = "conformer_transducer",
                 speech_config=dict,
                 **kwargs):
        super(ConformerTransducer, self).__init__(
            encoder=ConformerEncoder(
                dmodel=dmodel,
                reduction_factor=reduction_factor,
                num_blocks=num_blocks,
                head_size=head_size,
                num_heads=num_heads,
                kernel_size=kernel_size,
                fc_factor=fc_factor,
                dropout=dropout,
                add_wav_info=speech_config['add_wav_info'],
                hop_size=int(speech_config['stride_ms'] * speech_config['sample_rate'] // 1000) * reduction_factor,
            ),
            vocabulary_size=vocabulary_size,
            embed_dim=embed_dim,
            embed_dropout=embed_dropout,
            num_lstms=num_lstms,
            lstm_units=lstm_units,
            joint_dim=joint_dim,
            name=name, speech_config=speech_config, **kwargs
        )
        self.time_reduction_factor = reduction_factor


class ConformerCTC(CtcModel):
    def __init__(self,
                 dmodel: int,
                 reduction_factor: int,
                 vocabulary_size: int,
                 num_blocks: int = 16,
                 head_size: int = 36,
                 num_heads: int = 4,
                 kernel_size: int = 32,
                 fc_factor: float = 0.5,
                 dropout: float = 0,
                 name='conformerCTC',
                 speech_config=dict,
                 **kwargs):
        super(ConformerCTC, self).__init__(
            encoder=ConformerEncoder(
                dmodel=dmodel,
                reduction_factor=reduction_factor,
                num_blocks=num_blocks,
                head_size=head_size,
                num_heads=num_heads,
                kernel_size=kernel_size,
                fc_factor=fc_factor,
                dropout=dropout,
                add_wav_info=speech_config['add_wav_info'],
                hop_size=int(speech_config['stride_ms'] * speech_config['sample_rate'] // 1000) * reduction_factor,
            ), num_classes=vocabulary_size, name=name, speech_config=speech_config)
        self.time_reduction_factor = reduction_factor


class ConformerLAS(LAS):
    def __init__(self,
                 config,
                 training=True,
                 enable_tflite_convertible=False,
                 speech_config=dict,
                 ):
        config['LAS_decoder'].update({'encoder_dim': config['dmodel']})
        decoder_config = LASConfig(**config['LAS_decoder'])

        super(ConformerLAS, self).__init__(
            encoder=ConformerEncoder(
                dmodel=config['dmodel'],
                reduction_factor=config['reduction_factor'],
                num_blocks=config['num_blocks'],
                head_size=config['head_size'],
                num_heads=config['num_heads'],
                kernel_size=config['kernel_size'],
                fc_factor=config['fc_factor'],
                dropout=config['dropout'],
                name=config['name'],
                add_wav_info=speech_config['add_wav_info'],
                hop_size=int(speech_config['stride_ms'] * speech_config['sample_rate'] // 1000) * config[
                    'reduction_factor'],
            ), config=decoder_config, training=training, enable_tflite_convertible=enable_tflite_convertible,
            speech_config=speech_config
        )
        self.time_reduction_factor = config['reduction_factor']


class conformer_ctc:
    def __init__(self, config):
        self.config = config
        self.model_type = None
        self.update_model_type()
        self.speech_config = self.config['speech_config']
        self.text_config = self.config['decoder_config']
        self.model_config = self.config['model_config']
        self.text_feature = TextFeaturizer(self.text_config, True)
        self.speech_feature = SpeechFeaturizer(self.speech_config)
        self.init_steps = None

    def update_model_type(self):
        if 'CTC' in self.config['model_config']['name'] and 'Multi' not in self.config['model_config']['name']:
            self.config['decoder_config'].update({'model_type': 'CTC'})
            self.model_type = 'CTC'
        elif 'Multi' in self.config['model_config']['name']:
            self.config['decoder1_config'].update({'model_type': 'CTC'})
            self.config['decoder2_config'].update({'model_type': 'CTC'})
            self.config['decoder3_config'].update({'model_type': 'CTC'})
            self.config['decoder_config'].update({'model_type': 'CTC'})
            self.model_type = 'MultiTask'
        elif 'LAS' in self.config['model_config']['name']:
            self.config['decoder_config'].update({'model_type': 'LAS'})
            self.model_type = 'LAS'
        else:
            self.config['decoder_config'].update({'model_type': 'Transducer'})
            self.model_type = 'Transducer'

    def conformer_model(self, training):
        self.model_config.update({'vocabulary_size': self.text_feature.num_classes})
        if self.model_config['name'] == 'ConformerTransducer':
            self.model_config.pop('LAS_decoder')
            self.model_config.pop('enable_tflite_convertible')
            self.model_config.update({'speech_config': self.speech_config})
            self.model = ConformerTransducer(**self.model_config)
        elif self.model_config['name'] == 'ConformerCTC':
            self.model_config.update({'speech_config': self.speech_config})
            self.model = ConformerCTC(**self.model_config)
        elif self.model_config['name'] == 'ConformerLAS':
            self.config['model_config']['LAS_decoder'].update({'n_classes': self.text_feature.num_classes})
            self.config['model_config']['LAS_decoder'].update({'startid': self.text_feature.start})
            self.model = ConformerLAS(self.config['model_config'], training=training,
                                      enable_tflite_convertible=self.config['model_config'][
                                          'enable_tflite_convertible'],
                                      speech_config=self.speech_config)
        else:
            raise Exception('not in supported model list')

    def load_model(self, training=True):

        if 'ESPNet' in self.model_config['name']:
            self.espnet_model(training)
        # elif 'Multi' in self.model_config['name']:
        #     self.multi_task_model(training)
        elif 'Conformer' in self.model_config['name']:
            self.conformer_model(training)
        # else:
        #     self.ds2_model(training)
        self.model.add_featurizers(self.text_feature)
        f, c = self.speech_feature.compute_feature_dim()

        if not training:
            if self.text_config['model_type'] != 'LAS':
                if self.model.mel_layer is not None:
                    self.model._build([3, 16000, 1])
                    self.model.return_pb_function([None, None, 1])
                else:
                    self.model._build([3, 80, f, c])
                    self.model.return_pb_function([None, None, f, c])

            else:
                if self.model.mel_layer is not None:
                    self.model._build([3, 16000, 1], training)
                    self.model.return_pb_function([None, None, 1])
                else:

                    self.model._build([2, 80, f, c], training)
                    self.model.return_pb_function([None, None, f, c])
            self.load_checkpoint(self.config)
        self.model._build([1, 16000, 1])
        self.load_checkpoint()

    def load_checkpoint(self, config):
        """Load checkpoint."""
        import os
        self.checkpoint_dir = os.path.join(config['learning_config']['running_config']["outdir"], "checkpoints")
        files = os.listdir(self.checkpoint_dir)
        files.sort(key=lambda x: int(x.split('_')[-1].replace('.h5', '')))
        self.model.load_weights(os.path.join(self.checkpoint_dir, files[-1]))
        self.init_steps = int(files[-1].split('_')[-1].replace('.h5', ''))


if __name__ == '__main__':
    config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    config.gpu_options.per_process_gpu_memory_fraction = 0.9
    tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
    params = am_hparams()
    am = Am(params)
