# -*- coding: utf-8 -*-  
'''
训练GPT2.0模型

@author: luoyi
Created on 2021年4月8日
'''
import sys
import os
#    取项目根目录
ROOT_PATH = os.path.abspath(os.path.dirname(__file__)).split('couplet')[0]
ROOT_PATH = ROOT_PATH + "couplet"
sys.path.append(ROOT_PATH)

import utils.conf as conf
import utils.dictionaries as dictionaries
import utils.logger_factory as logf
import data.dataset_cpt2 as ds_cpt2
from models.gpt2.nets import Gpt2Model
from models.gpt2.preprocess import pre_embedding_weights
from math import floor


#    初始化日志
log = logf.get_logger('gpt2_train')

#    加载词/字典
dictionaries.load_dict_from_file()

#    加载预训练的词向量参数
embedding_weights = pre_embedding_weights()


#    当初生成tfrecord文件时的样本数
count = conf.DATASET.get_count_train()
batch_size = conf.DATASET.get_batch_size()
steps_per_epoch = floor(count / batch_size)
epochs = conf.DATASET.get_epochs()

#    加载数据集
db_train = ds_cpt2.pre_tensor_db(inputs_path=conf.DATASET.get_in_train(), 
                                 labels_path=conf.DATASET.get_label_train(), 
                                 count=count, 
                                 sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(), 
                                 batch_size=batch_size, 
                                 epochs=epochs, 
                                 shuffle_buffer_rate=conf.GPT2.get_shuffle_buffer_rate())
db_val = ds_cpt2.pre_tensor_db(inputs_path=conf.DATASET.get_in_val(), 
                               labels_path=conf.DATASET.get_label_val(), 
                               count=conf.DATASET.get_count_val(), 
                               sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(), 
                               batch_size=batch_size, 
                               epochs=epochs, 
                               shuffle_buffer_rate=conf.GPT2.get_shuffle_buffer_rate())
log.info('init dataset finished. train.count:{} epochs:{}'.format(count, epochs))


#    初始化模型
gpt2_model = Gpt2Model(name='gpt2_model', 
                       batch_size=batch_size, 
                       n_head_attention=conf.GPT2.get_n_head_attention(), 
                       sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(), 
                       f_model=conf.GPT2.get_f_model(), 
                       d_model=conf.GPT2.get_d_model(), 
                       dropout_rate=conf.GPT2.get_dropout_rate(), 
                       learning_rate=conf.GPT2.get_learning_rate(), 
                       n_block=conf.GPT2.get_n_block(), 
                       vocab_size=dictionaries.dict_size(), 
                       input_shape=(None, conf.GPT2.get_pre_training_sentence_maxlen()),
                       pre_embedding_weights=embedding_weights)
log.info('init model finished.')
gpt2_model.show_info()


gpt2_model.train_tensor_db(db_train, db_val, 
                           steps_per_epoch, 
                           batch_size, 
                           epochs, 
                           auto_save_weights_after_traind=True, 
                           auto_save_weights_dir=conf.GPT2.get_model_save_weights(), 
                           auto_learning_rate_schedule=True, 
                           auto_tensorboard=True, 
                           auto_tensorboard_dir=conf.GPT2.get_tensorboard_dir())




