import numpy as np
np.random.seed(3) # 固定初始随机值

import keras
from keras.layers import (
    Input, Conv2D, MaxPool2D, LeakyReLU, # BatchNormalization,
    LocallyConnected2D, Flatten, Dense, Dropout, Reshape, ZeroPadding2D,
    UpSampling2D, Conv2DTranspose, Concatenate)
from keras.models import Model
import keras.backend as K

from PIL import Image

from tqdm import tqdm

import os, pickle
from glob import glob
import datetime
import time
from urllib import request, parse
from urllib.error import HTTPError, URLError


class Template:
    def __init__(self, model_filename, input_shape,
                 optimizer, loss, metrics,
                 clean=False, train=True,
                 batch_size=128, epochs_num=60):

        self.model_filename = model_filename
        self.input_shape = input_shape

        self.optimizer = optimizer
        self.loss = loss
        self.metrics = metrics

        self.USE_CLEAN = clean
        self.USE_TRAIN = train

        self.batch_size = batch_size
        self.epochs_num = epochs_num

        self.actual_epoch = 0

    def run(self, test_max_steps=10, time=None, remote_monitor=None, date=None):
        if not self.USE_CLEAN and os.path.isfile(self.model_filename):
            self.loadModel()
        else:
            self.buildModel()

        generatorArgs, steps = self.findPaths()
        generator = self.generatorDataset(generatorArgs)

        if self.USE_TRAIN:
            if time is None:
                self.train(generator, steps)
            else:
                self.timingTrain(time, generator, steps, remote_monitor, date)

        self.test(generator, min(steps, test_max_steps))

        # Test code.
        return self.model_filename, self.actual_epoch

    def nnet(self): return None

    def findPaths(self): return (None, None)

    def generatorDataset(self, args): yield (None, None)

    def buildModel(self):
        self.model = self.nnet()
        self.model.compile(
                optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)

    def loadModel(self):
        self.model = keras.models.load_model(self.model_filename)

    def saveModel(self): pass

    def train(self, generator, steps_per_epoch):
        # self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=self.epochs_num)
        self.model.fit_generator(
                generator, steps_per_epoch=steps_per_epoch, epochs=self.epochs_num, workers=3)
        self.saveModel()
        self.actual_epoch = self.epochs_num

    def dumpDescription(self, step, steps_per_epoch, args): return None

    def remoteMonitor(self, url, step, steps_per_epoch, start_time, args):
        epoch = (step+1) // steps_per_epoch
        loss, metric = args
        count_steps = steps_per_epoch * self.epochs_num
        delta_time = (time.time() - start_time) / (step + 1)
        est_time = (count_steps - step - 1) * delta_time
        est_time = datetime.datetime.today() + datetime.timedelta(seconds=est_time)
        est_time = est_time.strftime('%H:%M:%S')

        data = {'model_filename': self.model_filename, 'epochs_num': self.epochs_num,
                'epoch': epoch, 'est_time': est_time, 'loss': loss, 'metric': metric}

        try:
            req = request.Request(url, data=parse.urlencode(data).encode())
            res = request.urlopen(req, timeout=3)
        except (HTTPError, URLError): pass
        except BaseException: pass

    def timingTrain(self, time_str, generator, steps_per_epoch, remote_monitor=None, date_str=None):
        today = datetime.datetime.today()
        dst_hour, dst_minute = [int(num_str) for num_str in time_str.split(':')]

        # >>> Test code
        # timer = datetime.datetime(today.year, today.month, today.day, dst_hour, dst_minute, 0)
        # if timer <= (datetime.datetime.today() - datetime.timedelta(hours=1)):
        #     timer = timer + datetime.timedelta(days=1)

        if date_str is not None:
            dst_year, dst_month, dst_day = [int(num_str) for num_str in date_str.split('-')]
            timer = datetime.datetime(dst_year, dst_month, dst_day, dst_hour, dst_minute, 0)
        else:
            timer = datetime.datetime(today.year, today.month, today.day, dst_hour, dst_minute, 0)
            if timer <= (datetime.datetime.today() - datetime.timedelta(hours=1)):
                timer = timer + datetime.timedelta(days=1)
        # <<<

        count_steps = steps_per_epoch * self.epochs_num
        pbar = tqdm(range(count_steps), ncols=100)
        start_time = time.time()
        for step in pbar:
            batch_X, batch_Y = next(generator)
            outputs = self.model.train_on_batch(batch_X, batch_Y)

            pbar_description = self.dumpDescription(step, steps_per_epoch, outputs)
            pbar.set_description(pbar_description)

            if remote_monitor is not None and ((step + 1) >= count_steps or step % 10 == 0):
                self.remoteMonitor(
                        remote_monitor, step, steps_per_epoch, start_time, outputs)

            if datetime.datetime.today() >= timer: print(); break

        self.saveModel()
        self.actual_epoch = (step+1) // steps_per_epoch

    def test(self, generator, steps): return None

class Classifier(Template):
    def __init__(self, datasets_path, model_filename, input_shape=(128, 128, 3),
                 clean=False, train=True,
                 batch_size=128, epochs_num=30):

        self.datasets_path = datasets_path
        self.labels = os.listdir(datasets_path)
        self.classes_num = len(self.labels)

        super().__init__(
                model_filename, input_shape,
                'Adam', 'categorical_crossentropy', ['accuracy'],
                clean, train, batch_size, epochs_num)

    def nnet(self): return None

    def findPaths(self):
        data_paths, data_labels = [], []
        for label in self.labels:
            paths = glob(os.path.join(self.datasets_path, label, '*'))
            data_paths.extend(paths)
            data_labels.extend([label] * len(paths))

        self.batch_size = min(self.batch_size, len(data_paths))
        steps = len(data_paths) // self.batch_size

        return [data_paths, data_labels], steps

    @staticmethod
    def formatImage(image, input_shape):
        src_size = np.array(image.size)
        dst_size = np.array([input_shape[0], input_shape[1]])

        if image.mode != 'RGB': image = image.convert('RGB')
        trans_array = np.zeros(input_shape, dtype='uint8') + 255

        k = dst_size / src_size.max()
        scale_size = np.around((k * src_size)).astype('int')
        trans_vect = ((dst_size - scale_size) / 2).astype('int')

        scale_image = image.resize(tuple(scale_size), resample=Image.BOX)
        scale_array = np.array(scale_image, dtype='uint8')
        trans_array[trans_vect[1]: trans_vect[1]+scale_size[1],
                    trans_vect[0]: trans_vect[0]+scale_size[0]] =\
                    np.array(scale_image, dtype='uint8')

        return trans_array

    def generatorDataset(self, args):
        data_paths, data_labels = args
        # Test code.
        width_shift_range, height_shift_range = 10, 10

        color_mode = 'RGB'
        # format_size = (self.input_shape[0], self.input_shape[1])
        if self.input_shape[-1] == 1: color_mode = 'L'
        # Test code.
        height = self.input_shape[0]
        width = self.input_shape[1]

        while True:
            shuffle_index = np.random.permutation(len(data_paths))
            batch_count = shuffle_index.size // self.batch_size
            for batch_time in range(batch_count):
                batch_index = shuffle_index[batch_time * self.batch_size:
                                            (batch_time + 1) * self.batch_size]

                batch_datas, batch_labels = [], []
                for i in batch_index:
                    img_data = Image.open(data_paths[i]).convert(color_mode)
                    img_label = self.labels.index(data_labels[i])
                    batch_datas.append(self.formatImage(img_data, self.input_shape))
                    batch_labels.append(img_label)

                batch_X = 1 - np.array(batch_datas, dtype='float32') / 255
                batch_X = batch_X.reshape((-1,) + self.input_shape)
                batch_Y = keras.utils.to_categorical(batch_labels, num_classes=self.classes_num)

                width_shift = np.random.randint(-width_shift_range, width_shift_range)
                height_shift = np.random.randint(-height_shift_range, height_shift_range)
                if width_shift >= 0:
                    if height_shift >= 0:
                        batch_X[:, height_shift:, width_shift:, :] =\
                        batch_X[:, :height-height_shift, :width-width_shift, :]
                    else:
                        batch_X[:, :height+height_shift, width_shift:, :] =\
                        batch_X[:, -height_shift:, :width-width_shift, :]
                else:
                    if height_shift >= 0:
                        batch_X[:, height_shift:, :width+width_shift, :] =\
                        batch_X[:, :height-height_shift, -width_shift:, :]
                    else:
                        batch_X[:, :height+height_shift, :width+width_shift, :] =\
                        batch_X[:, -height_shift:, -width_shift:, :]

                yield batch_X, batch_Y

    def saveModel(self):
        self.model.save(self.model_filename)
        with open(os.path.splitext(self.model_filename)[0] + '.pkl', 'wb') as f:
            pickle.dump(self.labels, f)

    def dumpDescription(self, step, steps_per_epoch, args):
        description_frame = 'Epoch: {}, Loss = {:0,.4f}, Accuracy = {:0,.4f}'
        epoch = (step+1) // steps_per_epoch
        loss, accuracy = args
        return description_frame.format(epoch, loss, accuracy)

    def test(self, generator, steps):
        evaluate = self.model.evaluate_generator(generator, steps)
        print('Evaluate Loss = {:0,.4f}, Accuracy = {:0,.4f}'.format(evaluate[0], evaluate[1]))
        return evaluate

class DocumentClassifier(Classifier):
    def nnet(self):
        input_X = Input(shape=self.input_shape, name='InputX')

        def ConvNet(input_X, feature_size):
            conv = Conv2D(feature_size, (3, 3), activation='relu', padding='same')(input_X)
            pool = MaxPool2D((2, 2))(conv)
            return pool

        conv1 = ConvNet(input_X, 1)
        conv2 = ConvNet(conv1, 4)
        conv3 = ConvNet(conv2, 8)
        conv4 = ConvNet(conv3, 16)
        conv5 = ConvNet(conv4, 32)

        flatten_conv3 = Flatten()(conv3)
        flatten_conv4 = Flatten()(conv4)
        flatten_conv5 = Flatten()(conv5)

        dense_conv3 = Dense(256, activation='relu')(flatten_conv3)
        dense_conv4 = Dense(256, activation='relu')(flatten_conv4)
        dense_conv5 = Dense(256, activation='relu')(flatten_conv5)

        merge = Concatenate()([dense_conv3, dense_conv4, dense_conv5])
        dense = Dense(256, activation='relu')(merge)
        dropout = Dropout(0.15)(dense)

        output_Y = Dense(self.classes_num, activation='softmax', name='OutputY')(dropout)

        return Model(inputs=input_X, outputs=output_Y)

class LayoutClassifier(Classifier):
    def nnet(self):
        input_X = Input(shape=self.input_shape, name='InputX')

        def ConvNet(input_X, feature_size):
            conv = Conv2D(feature_size, (3, 3), activation='relu', padding='same')(input_X)
            pool = MaxPool2D((2, 2))(conv)
            return pool

        conv1 = ConvNet(input_X, 1)
        conv2 = ConvNet(conv1, 4)
        conv3 = ConvNet(conv2, 8)
        conv4 = ConvNet(conv3, 16)
        conv5 = ConvNet(conv4, 32)

        flatten_conv3 = Flatten()(conv3)
        flatten_conv4 = Flatten()(conv4)
        flatten_conv5 = Flatten()(conv5)

        dense_conv3 = Dense(256, activation='relu')(flatten_conv3)
        dense_conv4 = Dense(128, activation='relu')(flatten_conv4)
        dense_conv5 = Dense(64, activation='relu')(flatten_conv5)

        merge = Concatenate()([dense_conv3, dense_conv4, dense_conv5])
        dense = Dense(128, activation='relu')(merge)
        dropout = Dropout(0.15)(dense)

        output_Y = Dense(self.classes_num, activation='softmax', name='OutputY')(dropout)

        return Model(inputs=input_X, outputs=output_Y)

class TypeClassifier(Classifier):
    def nnet(self):
        input_X = Input(shape=self.input_shape, name='InputX')

        def ConvNet(input_X, feature_size):
            conv = Conv2D(feature_size, (3, 3), activation='relu', padding='same')(input_X)
            pool = MaxPool2D((2, 2))(conv)
            return pool

        conv1 = ConvNet(input_X, 1)
        conv2 = ConvNet(conv1, 4)
        conv3 = ConvNet(conv2, 8)
        conv4 = ConvNet(conv3, 16)
        conv5 = ConvNet(conv4, 32)

        flatten_conv3 = Flatten()(conv3)
        flatten_conv4 = Flatten()(conv4)
        flatten_conv5 = Flatten()(conv5)

        dense_conv3 = Dense(64, activation='relu')(flatten_conv3)
        dense_conv4 = Dense(128, activation='relu')(flatten_conv4)
        dense_conv5 = Dense(256, activation='relu')(flatten_conv5)

        merge = Concatenate()([dense_conv3, dense_conv4, dense_conv5])
        dense = Dense(128, activation='relu')(merge)
        dropout = Dropout(0.15)(dense)

        output_Y = Dense(self.classes_num, activation='softmax', name='OutputY')(dropout)

        return Model(inputs=input_X, outputs=output_Y)

class AreaDetect(Template):
    def __init__(self, datas_dir, labels_dir, colors, model_filename,
                 input_shape=(128, 128, 3), clean=False, train=True,
                 batch_size=50, epochs_num=300):

        self.datas_dir = datas_dir
        self.labels_dir = labels_dir
        self.colors = np.array(colors).reshape(-1, 3)
        self.classes_num = len(self.colors)

        super().__init__(
                model_filename, input_shape,
                'Adam', self.NMSE, ['mae'],
                clean, train, batch_size, epochs_num)

    def nnet(self):
        input_X = Input(shape=self.input_shape, name='InputX')

        conv1 = Conv2D(1, (1, 1), padding='same')(input_X)
        pool1 = MaxPool2D((2, 2))(conv1)

        conv2 = LeakyReLU(0.3)(Conv2D(3, (3, 3), padding='same')(pool1))
        pool2 = MaxPool2D((2, 2))(conv2)

        # normalized1 = BatchNormalization()(pool2)
        # conv3 = LeakyReLU(0.3)(Conv2D(5, (3, 3), padding='same')(normalized1))
        conv3 = LeakyReLU(0.3)(Conv2D(5, (3, 3), padding='same')(pool2))
        pool3 = MaxPool2D((2, 2))(conv3)

        conv4 = LeakyReLU(0.3)(Conv2D(8, (3, 3), padding='same')(pool3))
        pool4 = MaxPool2D((2, 2))(conv4)

        # normalized2 = BatchNormalization()(pool4)

        # flattened = Flatten()(normalized2)
        flattened = Flatten()(pool4)
        dense1 = Dense(256, activation='relu')(flattened)
        droped = Dropout(0.5)(dense1)
        dense2 = Dense(8 * 8**2, activation='relu')(droped)

        lcon1 = LocallyConnected2D(8, (3, 3), activation='relu')(pool4)
        padd1 = ZeroPadding2D()(lcon1)
        lcon2 = LocallyConnected2D(8, (3, 3), activation='relu')(padd1)
        padd2 = ZeroPadding2D()(lcon2)

        reshaped = Reshape((8, 8, 8))(dense2)
        concatenated1 = Concatenate()([reshaped, padd2])

        decv1 = Conv2DTranspose(8, (3, 3), strides=2, padding='same', activation='relu')(concatenated1)
        decv2 = Conv2DTranspose(5, (3, 3), strides=2, padding='same', activation='relu')(decv1)
        decv3 = Conv2DTranspose(3, (3, 3), strides=2, padding='same', activation='relu')(decv2)

        concatenated2 = Concatenate()([pool1, decv3])
        decv4 = Conv2DTranspose(self.classes_num * 2, (3, 3), strides=2, padding='same', activation='relu')(concatenated2)

        output_Y = Conv2DTranspose(self.classes_num, (3, 3), padding='same', activation='relu', name='OutputY')(decv4)

        return Model(inputs=input_X, outputs=output_Y)

    def findPaths(self):
        data_paths = glob(os.path.join(self.datas_dir, '*'))
        label_paths = [os.path.join(self.labels_dir, os.path.basename(path))
                       for path in data_paths]

        self.batch_size = min(self.batch_size, len(data_paths))
        steps = len(data_paths) // self.batch_size

        return [data_paths, label_paths], steps

    @staticmethod
    def formatImage(image, input_shape):
        src_size = np.array(image.size)
        dst_size = np.array([input_shape[0], input_shape[1]])

        if image.mode != 'RGB': image = image.convert('RGB')
        trans_array = np.zeros(input_shape, dtype='uint8') + 255

        k = dst_size / src_size.max()
        scale_size = np.around((k * src_size)).astype('int')
        trans_vect = ((dst_size - scale_size) / 2).astype('int')

        scale_image = image.resize(tuple(scale_size), resample=Image.BOX)
        scale_array = np.array(scale_image, dtype='uint8')
        trans_array[trans_vect[1]: trans_vect[1]+scale_size[1],
                    trans_vect[0]: trans_vect[0]+scale_size[0]] =\
                    np.array(scale_image, dtype='uint8')

        return trans_array

    def getTarget(self, image, limit=100):
        img_arr = self.formatImage(image, self.input_shape)
        filterImage = lambda color: np.sum((img_arr - color)**2, axis=2)**0.5 < limit
        classes_num = len(self.colors)
        mask_shape = (self.input_shape[0], self.input_shape[1], classes_num)
        mask = np.stack([filterImage(color) for color in self.colors], axis=2)
        mask = mask.astype('uint8') * 255
        return mask

    def generatorDataset(self, args):
        data_paths, label_paths = args
        width_shift_range, height_shift_range = 10, 10
        color_mode = 'RGB'
        if self.input_shape[-1] == 1: color_mode = 'L'
        height = self.input_shape[0]
        width = self.input_shape[1]

        while True:
            shuffle_index = np.random.permutation(len(data_paths))
            batch_count = shuffle_index.size // self.batch_size
            for batch_time in range(batch_count):
                batch_index = shuffle_index[batch_time * self.batch_size:
                                            (batch_time + 1) * self.batch_size]

                batch_datas, batch_labels = [], []
                for i in batch_index:
                    img_data = Image.open(data_paths[i])
                    img_label = Image.open(label_paths[i])
                    batch_datas.append(self.formatImage(img_data, self.input_shape))
                    batch_labels.append(self.getTarget(img_label))

                batch_X = 1 - np.array(batch_datas, dtype='float32') / 255
                batch_X = batch_X.reshape((-1,) + self.input_shape)
                batch_Y = np.array(batch_labels, dtype='float32') / 255
                batch_Y = batch_Y.reshape((-1, self.input_shape[0], self.input_shape[1], self.classes_num))

                width_shift = np.random.randint(-width_shift_range, width_shift_range)
                height_shift = np.random.randint(-height_shift_range, height_shift_range)
                if width_shift >= 0:
                    if height_shift >= 0:
                        batch_X[:, height_shift:, width_shift:, :] =\
                        batch_X[:, :height-height_shift, :width-width_shift, :]
                        batch_Y[:, height_shift:, width_shift:, :] =\
                        batch_Y[:, :height-height_shift, :width-width_shift, :]
                    else:
                        batch_X[:, :height+height_shift, width_shift:, :] =\
                        batch_X[:, -height_shift:, :width-width_shift, :]
                        batch_Y[:, :height+height_shift, width_shift:, :] =\
                        batch_Y[:, -height_shift:, :width-width_shift, :]
                else:
                    if height_shift >= 0:
                        batch_X[:, height_shift:, :width+width_shift, :] =\
                        batch_X[:, :height-height_shift, -width_shift:, :]
                        batch_Y[:, height_shift:, :width+width_shift, :] =\
                        batch_Y[:, :height-height_shift, -width_shift:, :]
                    else:
                        batch_X[:, :height+height_shift, :width+width_shift, :] =\
                        batch_X[:, -height_shift:, -width_shift:, :]
                        batch_Y[:, :height+height_shift, :width+width_shift, :] =\
                        batch_Y[:, -height_shift:, -width_shift:, :]

                yield batch_X, batch_Y

    @staticmethod
    def NMSE(y_true, y_pred):
        mean = K.mean(y_true, axis=(0, 1, 2)) + K.epsilon()
        k = K.reshape(0.5 / mean, shape=(1, -1))
        r = 1 / k
        return K.mean(r * K.square(k * (y_true - y_pred)))

    def loadModel(self):
        self.model = keras.models.load_model(
                self.model_filename, custom_objects={'NMSE':self.NMSE})

    def saveModel(self):
        self.model.save(self.model_filename)

    def dumpDescription(self, step, steps_per_epoch, args):
        description_frame = 'Epoch: {}, NMSE = {:0,.4f}, MAE = {:0,.4f}'
        epoch = (step+1) // steps_per_epoch
        loss, accuracy = args
        return description_frame.format(epoch, loss, accuracy)

    def test(self, generator, steps):
        evaluate = self.model.evaluate_generator(generator, steps)
        print('Evaluate NMSE = {:0,.4f}, MAE = {:0,.4f}'.format(evaluate[0], evaluate[1]))
        return evaluate

class TextDeskew(Template):
    def __init__(self, datasets_path, model_filename, input_shape=(128, 128, 1),
                 clean=False, train=True,
                 batch_size=50, epochs_num=200):

        self.datasets_path = datasets_path
        self.classes_num = 4

        super().__init__(
                model_filename, input_shape,
                'Adadelta', 'categorical_crossentropy', ['accuracy'],
                clean, train, batch_size, epochs_num)

    def nnet(self):
        input_X = Input(self.input_shape, name='InputX')

        conv1 = Conv2D(1, (3, 3), activation='relu')(input_X)
        pool1 = MaxPool2D((2, 2))(conv1)

        conv2 = Conv2D(3, (3, 3), activation='relu')(pool1)
        pool2 = MaxPool2D((2, 2))(conv2)

        # normalized = BatchNormalization()(pool2)

        # conv3 = Conv2D(5, (3, 3), activation='relu')(normalized)
        conv3 = Conv2D(5, (3, 3), activation='relu')(pool2)
        pool3 = MaxPool2D((2, 2))(conv3)

        conv4 = Conv2D(8, (3, 3), activation='relu')(pool3)
        pool4 = MaxPool2D((2, 2))(conv4)

        flattened = Flatten()(pool4)
        dense1 = Dense(128, activation='relu')(flattened)
        droped = Dropout(0.5)(dense1)
        dense2 = Dense(64, activation='relu')(droped)

        output_Y = Dense(self.classes_num, activation='softmax')(dense2)

        return Model(inputs=input_X, outputs=output_Y)

    def findPaths(self):
        data_paths = glob(os.path.join(self.datasets_path, '*'))
        self.batch_size = min(self.batch_size, len(data_paths))
        steps = len(data_paths) // self.batch_size
        return data_paths, steps

    def formatImage(self, image):
        src_size = np.array(image.size)
        dst_size = np.array([self.input_shape[0], self.input_shape[1]])

        if image.mode != 'L': image = image.convert('L')
        trans_array = np.zeros((self.input_shape[0], self.input_shape[1]), dtype='uint8') + 255

        k = dst_size / src_size.max()
        scale_size = np.around((k * src_size)).astype('int')
        trans_vect = ((dst_size - scale_size) / 2).astype('int')

        scale_image = image.resize(tuple(scale_size), resample=Image.BOX)
        scale_array = np.array(scale_image, dtype='uint8')
        trans_array[trans_vect[1]: trans_vect[1]+scale_size[1],
                    trans_vect[0]: trans_vect[0]+scale_size[0]] =\
                    np.array(scale_image, dtype='uint8')

        return trans_array

    def generatorDataset(self, args):
        data_paths = args
        color_mode = 'RGB'
        if self.input_shape[-1] == 1: color_mode = 'L'

        while True:
            shuffle_index = np.random.permutation(len(data_paths))
            batch_count = shuffle_index.size // self.batch_size
            for batch_time in range(batch_count):
                batch_index = shuffle_index[batch_time * self.batch_size:
                                            (batch_time + 1) * self.batch_size]

                batch_datas, batch_labels = [], []
                for i in batch_index:
                    rot_time = np.random.randint(4)
                    img_data = Image.open(data_paths[i]).convert(color_mode)
                    batch_datas.append(np.rot90(self.formatImage(img_data), k=rot_time))
                    batch_labels.append(rot_time)

                batch_X = 1 - np.array(batch_datas, dtype='float32') / 255
                batch_X = batch_X.reshape((-1,) + self.input_shape)
                batch_Y = keras.utils.to_categorical(batch_labels, num_classes=self.classes_num)

                yield batch_X, batch_Y

    def saveModel(self):
        self.model.save(self.model_filename)

    def dumpDescription(self, step, steps_per_epoch, args):
        description_frame = 'Epoch: {}, Loss = {:0,.4f}, Accuracy = {:0,.4f}'
        epoch = (step+1) // steps_per_epoch
        loss, accuracy = args
        return description_frame.format(epoch, loss, accuracy)

    def test(self, generator, steps):
        evaluate = self.model.evaluate_generator(generator, steps)
        print('Evaluate Loss = {:0,.4f}, Accuracy = {:0,.4f}'.format(evaluate[0], evaluate[1]))
        return evaluate

if __name__ == '__main__':
    # DocumentClassifier('../DocumentClassifier/datasets/', './doc_clf.h5').run()
    # LayoutClassifier('../LayoutClassifier/datasets/', './lay_clf.h5').run()
    # TypeClassifier('../TypeClassifier/datasets/', './type_clf.h5').run()
    # AreaDetect('../AreaDetect/test_data/营业执照-原图', '../AreaDetect/test_data/营业执照-标记',
    #            [(255, 0, 0)], 'business_license_ad.h5').run()
    # TextDeskew('../Preprocessing/datasets/', './deskew.h5').run()
    # DocumentClassifier('../DocumentClassifier/datasets/', './doc_clf2.h5', clean=True).run(time='13:08')
    # AreaDetect('../AreaDetect/test_data/营业执照-原图', '../AreaDetect/test_data/营业执照-标记',
    #         [(255, 0, 0)], 'business_license_ad2.h5').run(time='15:10')
    # TextDeskew('../Preprocessing/datasets/', './deskew2.h5').run(time='15:10')
    # DocumentClassifier('../DocumentClassifier/datasets/', './doc_clf3.h5', clean=True).run(time='18:30')
    # LayoutClassifier('../LayoutClassifier/datasets/', './lay_clf3.h5', clean=True).run(time='18:30')
    # TypeClassifier('../TypeClassifier/datasets/', './type_clf3.h5', epochs_num=10).run(time='16:40')
    # AreaDetect('../AreaDetect/test_data/business_license_data', '../AreaDetect/test_data/business_license_label',
    #         [(255, 0, 0)], 'business_license_ad3.h5', clean=True).run(time='18:30')
    # TextDeskew('../Preprocessing/datasets/', './deskew3.h5').run(time='18:50')
    pass
