# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/3/30 16:47
@Author : SPZ
@File : densent_seg.py
@Software: pycharm
"""

import keras.backend as K
from train_lib.lib.module.dense_module import _dense_block, _transition_block
from keras import Input, Model
from keras.layers import Conv2D, BatchNormalization, Activation, Permute, TimeDistributed, Flatten, Dense, Lambda
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils.multi_gpu_utils import multi_gpu_model

from train_lib.lib.data_loader import DataLoader


def _ctc_loss(args):
    labels, y_pred, input_length, label_length = args
    ctc = K.ctc_batch_cost(labels, y_pred, input_length, label_length)
    return ctc


class DenseNetOCR:

    def __init__(self,
                 num_classes,
                 lr=0.0005,
                 image_height=32,
                 image_channels=1,
                 maxlen=10,
                 dropout_rate=0.2,
                 weight_decay=1e-4,
                 filters=64,
                 num_gpu=1,
                 ):
        self.image_shape = (image_height, None, image_channels)
        self.lr = lr
        self.maxlen = maxlen
        self.dropout_rate = dropout_rate
        self.weight_decay = weight_decay
        self.filters = filters
        self.num_classes = num_classes
        self.num_gpu = num_gpu
        self.model = self.__rec_model()
        self.parallel_model = self.train_model()

    def __base_model(self, input_tensor):
        nb_filter = self.filters

        x0 = Conv2D(nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
                    use_bias=False, kernel_regularizer=l2(self.weight_decay))(input_tensor)

        # 64 +  8 * 8 = 128
        x, nb_filter = _dense_block(x0, 8, nb_filter, 8, None, self.weight_decay)
        # 128
        x, nb_filter = _transition_block(x, 128, self.dropout_rate, 2, self.weight_decay)

        # 128 + 8 * 8 = 192
        x, nb_filter = _dense_block(x, 8, nb_filter, 8, None, self.weight_decay)
        # 192->128
        x, nb_filter = _transition_block(x, 128, self.dropout_rate, 2, self.weight_decay)

        # 128 + 8 * 8 = 192
        x, nb_filter = _dense_block(x, 8, nb_filter, 8, None, self.weight_decay)

        x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        return x

    def __rec(self, feat):
        x_ctc = Permute((2, 1, 3), name='permute')(feat)
        x_ctc = TimeDistributed(Flatten(), name='flatten')(x_ctc)
        y_ctc = Dense(self.num_classes, name='out', activation='softmax')(x_ctc)
        return y_ctc

    def __rec_model(self):
        input_tensor = Input(shape=self.image_shape, name="the_input")
        x = self.__base_model(input_tensor)
        y_ctc = self.__rec(feat=x)
        rec_model = Model(inputs=input_tensor, outputs=y_ctc)
        return rec_model

    def train_model(self):
        input_tensor = self.model.input
        labels = Input(shape=(self.maxlen,), dtype='float32', name="the_labels")
        input_length = Input(shape=(1,), name="input_length", dtype='int64')
        label_length = Input(shape=(1,), name="label_length", dtype='int64')
        rec_predict = self.model.get_layer('out').output

        adam = Adam(self.lr)
        loss_ctc = Lambda(_ctc_loss, output_shape=(1,), name='ctc')([labels, rec_predict, input_length, label_length])
        model_input = [input_tensor, labels, input_length, label_length]
        model = Model(inputs=model_input, outputs=loss_ctc)
        parallel_model = model

        if self.num_gpu > 1:
            parallel_model = multi_gpu_model(model, gpus=self.num_gpu)
        parallel_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam, metrics=['accuracy'])
        return parallel_model

    def train(self, epochs, train_data_loader: DataLoader, valid_data_loader: DataLoader, **kwargs):
        self.parallel_model.fit_generator(generator=train_data_loader.load_data(), epochs=epochs,
                                          steps_per_epoch=train_data_loader.steps_per_epoch,
                                          validation_data=valid_data_loader.load_data(),
                                          validation_steps=valid_data_loader.steps_per_epoch,
                                          **kwargs)
