# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/4/10 17:30 
@Author : SPZ
@File : train.py
@Software: pycharm
"""

import os
import json

try:
    import moxing as mox
    import moxing.framework.file as file
except Exception as info:
    print(info)
import tensorflow as tf
import keras.backend as K
from keras.callbacks import EarlyStopping, TensorBoard

from train_lib.lib.custom import LRScheduler, SingleModelCK
from train_lib.core import DenseNetOCR
from train_lib.lib.data_loader import DataLoader
from train_lib.lib import utils

tf.flags.DEFINE_integer('batch_size', 8, 'number of training iterations.')
tf.flags.DEFINE_integer('max_epochs', 2, 'max number of training iterations.')
tf.flags.DEFINE_integer('num_gpus', 1, 'gpu nums.')
tf.flags.DEFINE_integer('num_classes', 37, 'num_classes.')
tf.flags.DEFINE_integer('steps_per_epoch', 10, 'step_per_epoch.')
tf.flags.DEFINE_string('data_url', './data', 'dataset directory.')
tf.flags.DEFINE_string('dict_url', './digitEn_37.txt', 'dictionary directory.')
tf.flags.DEFINE_string('train_file_url', './data/train_labels.txt', 'train labels file.')
tf.flags.DEFINE_string('val_file_url', './data/val_labels.txt', 'val labels file.')
tf.flags.DEFINE_string('train_url', './output', 'saved model directory.')
tf.flags.DEFINE_float('learning_rate', 0.05, 'learning rate.')
tf.flags.DEFINE_bool('local_debug', True, 'local debug.')
args = tf.flags.FLAGS

if not args.local_debug:
    # from obs to train machine
    local_dict_path = '/cache/dict.txt'
    local_train_path = '/cache/train.txt'
    local_val_path = '/cache/val.txt'
    local_output_path = '/cache/output/'
    model_output_path = os.path.join(local_output_path, "model")
    if not os.path.exists(local_output_path):
        os.makedirs(local_output_path)

    mox.file.copy(args.dict_url, local_dict_path)
    mox.file.copy(args.train_file_url, local_train_path)
    mox.file.copy(args.val_file_url, local_val_path)

    if not mox.file.exists(args.data_url):
        print(f'{args.data_url} dose not exists!!')
else:
    # 使用CPU训练 若使用GPU 请注释掉此句
    os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
    local_dict_path = args.dict_url
    local_train_path = args.train_file_url
    local_val_path = args.val_file_url
    local_output_path = args.train_url
    if not os.path.exists(local_output_path):
        os.makedirs(local_output_path)
    model_output_path = os.path.join(local_output_path, 'pb_model')


def save_keras_model_to_serving(model, export_path):
    signature = tf.saved_model.signature_def_utils.predict_signature_def(
        inputs={'images': model.inputs[0]}, outputs={'logits': model.outputs[0]})
    builder = tf.saved_model.builder.SavedModelBuilder(export_path)

    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')

    builder.add_meta_graph_and_variables(
        sess=K.get_session(),
        tags=[tf.saved_model.tag_constants.SERVING],
        signature_def_map={
            'segmentation': signature,
        },
        legacy_init_op=legacy_init_op)
    builder.save()


if __name__ == '__main__':
    K.set_session(utils.get_session(0.9))

    # 训练超参数
    batch_size = args.batch_size
    max_epochs = args.max_epochs
    num_gpus = args.num_gpus
    num_classes = args.num_classes
    learning_rate = args.learning_rate
    steps_per_epoch = args.steps_per_epoch

    # 载入训练数据
    images_dir = args.data_url
    dict_file_path = local_dict_path
    train_labeled_file_path = local_train_path
    test_labeled_file_path = local_val_path
    train_url = args.train_url

    config = {'num_gpu': num_gpus,
              'num_classes': num_classes,
              'lr': learning_rate}
    ocr = DenseNetOCR(**config)

    encoding = "UTF-8"

    save_weights_file_path = os.path.join(local_output_path, 'weights-{epoch:02d}.hdf5')

    # 数据生成器
    train_data_loader = DataLoader(images_dir=images_dir,
                                   dict_file_path=dict_file_path,
                                   labeled_file_path=train_labeled_file_path,
                                   image_shape=(32, 280),
                                   encoding=encoding,
                                   batch_size=batch_size,
                                   steps_per_epoch=steps_per_epoch)

    valid_data_loader = DataLoader(images_dir=images_dir,
                                   dict_file_path=dict_file_path,
                                   labeled_file_path=test_labeled_file_path,
                                   image_shape=(32, 280),
                                   encoding=encoding,
                                   batch_size=batch_size,
                                   steps_per_epoch=10)

    checkpoint = SingleModelCK(save_weights_file_path,
                               model=ocr.model,
                               save_weights_only=True)

    earlystop = EarlyStopping(patience=10)
    log = TensorBoard(log_dir='logs', histogram_freq=0, batch_size=train_data_loader.batch_size,
                      write_graph=True,
                      write_grads=False)

    # 观测ctc损失的值，一旦损失回升，将学习率缩小一半
    lr_scheduler = LRScheduler(lambda _, lr: lr / 2, watch="loss", watch_his_len=2)

    ocr.train(epochs=max_epochs,
              train_data_loader=train_data_loader,
              valid_data_loader=valid_data_loader,
              callbacks=[earlystop, checkpoint, log, lr_scheduler],
              initial_epoch=0)

    save_keras_model_to_serving(model=ocr.model, export_path=model_output_path)

    try:
        file.copy_parallel(local_output_path, train_url)
    except Exception as info:
        print(info)
