# -*- coding: utf-8 -*-  
'''
训练transformer网络


@author: luoyi
Created on 2021年3月22日
'''
import sys
import os
from math import floor
#    取项目根目录
ROOT_PATH = os.path.abspath(os.path.dirname(__file__)).split('couplet')[0]
ROOT_PATH = ROOT_PATH + "couplet"
sys.path.append(ROOT_PATH)

import utils.logger_factory as logf
import utils.conf as conf
import utils.dictionaries as dictionaries
import data.dataset as ds_couplet
from models.transformer.preprocess import x_preprocess, y_preprocess
from models.transformer.nets import TransformerModel


log = logf.get_logger('transformer_train')


#    初始化字典
dictionaries.load_dict_from_file()


#    准备数据集
count_train = conf.DATASET.get_count_train()
batch_size = conf.DATASET.get_batch_size()
steps_per_epoch = floor(count_train / batch_size)
epochs=conf.DATASET.get_epochs()
db_train = ds_couplet.tensor_db(in_path=conf.DATASET.get_in_train(), 
                                out_path=conf.DATASET.get_label_train(), 
                                count=count_train, 
                                sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(), 
                                x_preprocess=x_preprocess, 
                                y_preprocess=y_preprocess, 
                                batch_size=batch_size, 
                                epochs=epochs, 
                                shuffle_buffer_rate=conf.DATASET.get_shuffle_buffer_rate())
db_val = ds_couplet.tensor_db(in_path=conf.DATASET.get_in_val(), 
                              out_path=conf.DATASET.get_label_val(), 
                              count=conf.DATASET.get_count_val(), 
                              sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(), 
                              x_preprocess=x_preprocess, 
                              y_preprocess=y_preprocess, 
                              batch_size=conf.DATASET.get_batch_size(), 
                              epochs=conf.DATASET.get_epochs(), 
                              shuffle_buffer_rate=conf.DATASET.get_shuffle_buffer_rate())
log.info('init db_train, db_val... ')


#    准备模型
transformer = TransformerModel(mutil_attention_groups=conf.TRANSFORMER.get_mutil_head_attention_groups(),
                               sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen(),
                               embedding_dims=conf.TRANSFORMER.get_embedding_dims(),
                               self_attention_dims=conf.TRANSFORMER.get_self_attention_dims(),
                               feed_forward_dims=conf.TRANSFORMER.get_feed_forward_dims(),
                               dict_size=dictionaries.dict_size(),
                               batch_size=conf.DATASET.get_batch_size(),
                               input_shape=(None, 2, conf.TRANSFORMER.get_sentence_maxlen()))
log.info('init model transformer...')
transformer.show_info()

#    喂数据
transformer.train_tensor_db(db_train=db_train, 
                            db_val=db_val, 
                            steps_per_epoch=steps_per_epoch, 
                            batch_size=batch_size, 
                            epochs=epochs, 
                            auto_save_weights_after_traind=True, 
                            auto_save_weights_dir=conf.TRANSFORMER.get_model_save_weights(), 
                            auto_learning_rate_schedule=True, 
                            auto_tensorboard=True, 
                            auto_tensorboard_dir=conf.TRANSFORMER.get_tensorboard_dir())


