#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File  : base.py
# Author: anyongjin
# Date  : 2020/10/13
from DistillBert.model import *


class StudentModel(BaseDistilModel):
    def __init__(self, in_pretrain=False, model_dir=None, distill_from_teacher=True, do_quantize=False):
        '''
        initialize a student model
        :param in_pretrain: whether in pretrain mode
        '''
        self.logits_layer = None
        self.logits = None  # knowage layer output
        self.do_quantize = do_quantize
        self.train_dense = False
        self.fine_tune_base = False  # change to True after model is nearly not change
        self.transfer_train = False  # 是否从base模型初始化
        self.train_mode = False
        super().__init__(False, in_pretrain, model_dir=model_dir, distill_from_teacher=distill_from_teacher)

    def load_model(self, model_loader, model_dir, step: int=None):
        model_name = 'model.h5' if not step else f'model.{step}.h5'
        model_path = os.path.join(model_dir, model_name)
        if not os.path.isfile(model_path):
            # load max step model if default model not exists!
            names = os.listdir(model_dir)
            max_step = 0
            for name in names:
                arr = name.split('.')
                if len(arr) == 3 and arr[0] == 'model' and arr[-1] == 'h5':
                    try:
                        max_step = max(max_step, int(arr[1]))
                    except Exception:
                        pass
            if max_step > 0:
                model_path = os.path.join(model_dir, f'model.{max_step}.h5')
                logger.warning(f'specified model not exists, using step model:{model_path}')
        if self.transfer_train:
            if not os.path.isfile(model_path) and hasattr(self, 'base_model_dir') \
                    and model_dir != self.base_model_dir and os.listdir(self.base_model_dir):
                logger.warning("student has's not been trained ever, initialized from general base model.")
                try:
                    self.load_model(model_loader, self.base_model_dir, step=step)
                    return
                except Exception:
                    logger.error(traceback.format_exc())
                    logger.warning('initialize from general base error, random initializing...')
        self.model = self.create_student_model()
        self.model.summary()
        if os.path.isfile(model_path):
            if self.do_quantize:
                import tensorflow_model_optimization as tfmot
                with tfmot.quantization.keras.quantize_scope():
                    self.model.load_weights(model_path, by_name=True)
                    logger.warning(f'student weights loaded from {model_path} in quantized mode')
            else:
                self.model.load_weights(model_path, by_name=True)
                logger.warning(f'student weights loaded from {model_path}')

    def convert_to_quantize_aware_model(self):
        '''
        only dense layer is quantized currently!
        :return:
        '''
        import tensorflow_model_optimization as tfmot
        from tensorflow.keras.utils import CustomObjectScope
        # Helper function uses `quantize_annotate_layer` to annotate that only the
        # Dense layers should be quantized.
        def apply_quantization_to_dense(layer):
            if isinstance(layer, tf.keras.layers.Dense):
                return tfmot.quantization.keras.quantize_annotate_layer(layer)
            return layer

        # Use `tf.keras.models.clone_model` to apply `apply_quantization_to_dense`
        # to the layers of the model.
        with CustomObjectScope({'gelu': gelu}):
            annotated_model = tf.keras.models.clone_model(
                self.model,
                clone_function=apply_quantization_to_dense,
            )

        # Now that the Dense layers are annotated,
        # `quantize_apply` actually makes the model quantization aware.
        with tfmot.quantization.keras.quantize_scope():
            quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
        self.model = quant_aware_model

    def quantize_aware_model(self):
        import tensorflow as tf
        converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        tflite_quant_model = converter.convert()
        self.model = tflite_quant_model

    def save_model(self, model_dir_or_path: str, **kwargs):
        if model_dir_or_path.endswith('.h5'):
            model_path = model_dir_or_path
        else:
            model_path = os.path.join(model_dir_or_path, 'model.h5')
        save_format, include_optimizer = kwargs.get('save_format'), kwargs.get('include_optimizer')
        if save_format != 'h5':
            model_path = model_dir_or_path
        if not self.do_quantize:
            self.model.save(model_path, save_format=save_format, include_optimizer=include_optimizer)
            return
        logger.warning('quantizing model ...')
        self.quantize_aware_model()
        logger.warning('quantize model complete')
        self.model.save(model_path, save_format=save_format, include_optimizer=False)

    def get_cell_layer(self, size, name, trainable=True, return_sequences=True,
                       return_state=False, cell_type=None):
        distill = self.config['distill']
        cell_type_str = cell_type if cell_type else distill['cell_type']
        import tensorflow as tf
        cell_type_map = {
            'lstm': tf.keras.layers.LSTM,
            'rnn': tf.keras.layers.RNN,
            'gru': tf.keras.layers.GRU,
            'cnn': tf.keras.layers.Conv1D
        }
        assert cell_type_str in cell_type_map, \
            f'invalid cell_type:{cell_type_str}, must be one of {cell_type_map.keys()}'
        cell_type = cell_type_map[cell_type_str]
        if cell_type_str != 'cnn':
            cell = cell_type(size, return_sequences=return_sequences, return_state=return_state,
                             trainable=trainable, name=name+'_c')
            layer = tf.keras.layers.Bidirectional(cell, name=name, trainable=trainable)
        else:
            layer = cell_type(size, 5, 3, trainable=trainable, activation=gelu)
        return layer

    def flat_cnn_layer(self, logits, append_dense=True):
        layer = self.get_cell_layer(self.hidden_size, name='word_cnn', cell_type='cnn')
        logits = layer(logits)
        logits = tf.keras.layers.MaxPool1D(3, name='max_pool')(logits)
        logits = tf.keras.layers.Reshape((-1,), name='flat')(logits)
        if append_dense:
            pooler = tf.keras.layers.Dense(
                self.hidden_size, activation="tanh", name="pooler",
                kernel_initializer=get_initializer(self.model_config.initializer_range)
            )
            logits = pooler(logits)
        return logits

    def create_best_95_model(self):
        import tensorflow as tf
        import tensorflow_model_optimization as tfmot
        quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
        assert 'distill' in self.config and self.config['distill'], 'section of distill in task_config.yml is invalid!'
        distill = self.config['distill']
        hidden_size = self.model_config.hidden_size
        vocab_size = self.tokenizer.vocab_size
        seq_len = self.config['max_len'] if not self.in_pretrain else self.model_config.max_position_embeddings
        train_base = self.in_pretrain or self.fine_tune_base
        input_ids = tf.keras.layers.Input((seq_len,), name='input_ids', dtype='int32')
        word_embs = tf.keras.layers.Embedding(vocab_size, hidden_size,
                                              trainable=train_base, name='embedding')(input_ids)
        lstm_out = word_embs
        for i, size in enumerate(distill['layer_sizes']):
            layer = self.get_cell_layer(hidden_size if size == -1 else size,
                                        name=f'bilstm_{i}', cell_type='lstm',
                                        trainable=train_base,
                                        return_sequences=True)
            if self.do_quantize:
                layer = quantize_annotate_layer(layer)
            lstm_out = layer(lstm_out)
        dense_layer = tf.keras.layers.Dense(hidden_size, activation='selu',
                                            trainable=train_base, name='base_dense')
        if self.do_quantize:
            dense_layer = quantize_annotate_layer(dense_layer)
        self.logits = dense_layer(lstm_out)
        # task layers
        drop_prob = self.train_args['dropout_prob']
        logits = tf.keras.layers.Dropout(drop_prob, name='base_drop')(self.logits)
        half_hidden_size = int(hidden_size / 2)
        layer = self.get_cell_layer(half_hidden_size, name='bilstm_task', cell_type='lstm', return_sequences=True)
        if self.do_quantize:
            layer = quantize_annotate_layer(layer)
        logits = layer(logits)
        logits = tf.keras.layers.Reshape((-1,), name='flat')(logits)
        pooler = tf.keras.layers.Dense(
            hidden_size, activation="tanh", name="pooler",
            kernel_initializer=get_initializer(self.model_config.initializer_range)
        )
        if self.do_quantize:
            pooler = quantize_annotate_layer(pooler)
        logits = pooler(logits)
        output_activate = 'softmax' if not self.distill_from_teacher else None
        classifier_layer = tf.keras.layers.Dense(
            self.model_config.num_labels,
            kernel_initializer=get_initializer(self.model_config.initializer_range),
            name="classifier",
            activation=output_activate
        )
        if self.do_quantize:
            classifier_layer = quantize_annotate_layer(classifier_layer)
        self.last_layer = classifier_layer.name
        output = classifier_layer(logits)

        model = tf.keras.models.Model(inputs=[input_ids], outputs=[output])
        if self.do_quantize:
            model = tfmot.quantization.keras.quantize_apply(model)
        return model

    def create_best_92_8_model(self):
        input_ids, logits = self.create_base_layers()
        import tensorflow_model_optimization as tfmot
        quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
        import tensorflow as tf
        drop_prob = self.train_args['dropout_prob']
        logits = tf.keras.layers.Dropout(drop_prob, name='base_drop')(logits)
        logits = self.get_cell_layer(self.hidden_size, name='rnn', return_sequences=True)(logits)
        logits = attention_layer(logits, name='task_att')
        logits = tf.keras.layers.Flatten()(logits)
        dense_layer = tf.keras.layers.Dense(self.hidden_size * 2, activation=gelu, name='dense',
                                       kernel_initializer=get_initializer(self.model_config.initializer_range))
        if self.do_quantize:
            dense_layer = quantize_annotate_layer(dense_layer)
        logits = dense_layer(logits)

        output_activate = 'softmax' if not self.distill_from_teacher else None
        classifier_layer = tf.keras.layers.Dense(
            self.model_config.num_labels,
            kernel_initializer=get_initializer(self.model_config.initializer_range),
            name="classifier",
            activation=output_activate
        )
        if self.do_quantize:
            classifier_layer = quantize_annotate_layer(classifier_layer)
        self.last_layer = classifier_layer.name
        output = classifier_layer(logits)
        model = tf.keras.models.Model(inputs=[input_ids], outputs=[output], name=self.model_type)
        if self.do_quantize:
            model = tfmot.quantization.keras.quantize_apply(model)
        return model

    def model_for_sentiment_analysis(self, logits):
        import tensorflow as tf
        drop_prob = self.train_args['dropout_prob']
        logits = tf.keras.layers.Flatten()(logits)
        logits = tf.keras.layers.Dropout(drop_prob, name='base_drop')(logits)
        # task_outs = self.get_cell_layer(self.hidden_size, name='rnn', return_sequences=True,
        #                                 return_state=True)(logits)
        # states = task_outs[1:]
        # logits = attention_layer(task_outs[0], name='task_att')
        # logits = self.flat_cnn_layer(logits, append_dense=False)
        # logits = tf.keras.layers.Concatenate()([logits, ] + list(states))
        logits = tf.keras.layers.Dense(self.hidden_size * 2, activation=gelu, name='dense',
                              kernel_initializer=get_initializer(self.model_config.initializer_range))(logits)

        output_activate = 'softmax' if not self.distill_from_teacher else None
        classifier_layer = tf.keras.layers.Dense(
            self.model_config.num_labels,
            kernel_initializer=get_initializer(self.model_config.initializer_range),
            name="classifier",
            activation=output_activate
        )
        self.last_layer = classifier_layer.name
        output = classifier_layer(logits)
        return output

    def create_base_layers(self):
        assert 'distill' in self.config and self.config['distill'], 'section of distill in task_config.yml is invalid!'
        distill = self.config['distill']
        hidden_size = self.hidden_size
        vocab_size = self.tokenizer.vocab_size
        self.seq_len = self.config['max_len'] if not self.in_pretrain else self.model_config.max_position_embeddings
        train_base = self.in_pretrain or self.fine_tune_base
        input_ids = tf.keras.layers.Input((self.seq_len,), name='input_ids', dtype='int32')
        word_embs = tf.keras.layers.Embedding(vocab_size, hidden_size,
                                              trainable=train_base, name='embedding')(input_ids)
        lstm_out = word_embs
        for i, size in enumerate(distill['layer_sizes']):
            lstm_size = hidden_size if size == -1 else size
            layer = self.get_cell_layer(lstm_size, name=f'base_{i}', trainable=train_base,
                                        return_sequences=True)
            lstm_out = layer(lstm_out)
        self.logits = tf.keras.layers.Dense(hidden_size, activation='selu',
                                            trainable=train_base, name='base_dense')(lstm_out)
        return input_ids, self.logits

    def create_pooled_classifier_distill_model(self):
        '''
        创建一个直接从teacher的pooled_outpus和classifier蒸馏的模型
        :return:
        '''
        hidden_size = self.hidden_size
        vocab_size = self.tokenizer.vocab_size
        self.seq_len = self.config['max_len'] if not self.in_pretrain else self.model_config.max_position_embeddings
        input_ids = tf.keras.layers.Input((self.seq_len,), name='input_ids', dtype='int32')
        word_embs = tf.keras.layers.Embedding(vocab_size, hidden_size,
                                              trainable=self.train_dense, name='embedding')(input_ids)
        logits = self.get_cell_layer(hidden_size, trainable=self.train_dense, name='cnn', cell_type='cnn')(word_embs)
        # logits = tf.keras.layers.MaxPool1D(3)(logits)
        logits = tf.keras.layers.Flatten()(logits)
        logits = tf.keras.layers.Dense(self.model_config.hidden_size, trainable=self.train_dense,
                                       activation='tanh', name='base_dense')(logits)
        classifier = tf.keras.layers.Dense(
            self.model_config.num_labels, name="classifier",
            kernel_initializer=get_initializer(self.model_config.initializer_range)
        )
        if not self.train_dense:
            logits = classifier(logits)
        model = tf.keras.models.Model(inputs=[input_ids], outputs=[logits], name=self.model_type)
        return model

    def create_student_model(self):
        input_ids, _ = self.create_base_layers()
        if self.in_pretrain:
            self.last_layer = 'base_dense'
            output = self.logits
        else:
            task_layer_name = 'model_for_' + self.config['task_type'].replace('-', '_')
            if not hasattr(self, task_layer_name):
                raise Exception(f'please implement member function:{task_layer_name}')
            output = getattr(self, task_layer_name)(self.logits)
            assert self.last_layer is not None, 'last_layer should be set in task_layer_name!'
        model = tf.keras.models.Model(inputs=[input_ids], outputs=[output], name=self.model_type)
        return model

    def wrap_one_hot_out(self, data_iter):
        for batch_data in data_iter:
            x, y = batch_data
            y_one = np.zeros((y.size, y.max() + 1))
            y_one[np.arange(y.size), y] = 1
            yield x, y_one

    def build_teach_data(self, data_iter, teacher_model):
        if self.in_pretrain:
            teach_out_handler = get_build_origin_teach_data(self.hidden_size)
        elif self.config['task_type'] == 'sentiment-analysis':
            teach_out_handler = get_build_sentiment_analysis_teach_data(self.train_dense)
        elif self.config['task_type'] == 'ner':
            teach_out_handler = build_ner_teach_data
        else:
            raise Exception(f'no teacher output handler for task:{self.config["task_type"]}')
        for batch_data in data_iter:
            t_outs = teacher_model(batch_data[0])
            # stu_out = self.model.predict(batch_data[0])
            y = batch_data[1] if len(batch_data) > 1 else None
            yield teach_out_handler(batch_data[0], y, t_outs)
