# -*- coding: utf-8 -*-  
'''
crf tplinker联合训练

@author: luoyi
Created on 2021年7月5日
'''
import glob
import os
import sys
#    取项目根目录
ROOT_PATH = os.path.abspath(os.path.dirname(__file__)).split('knowledge_graph')[0]
ROOT_PATH = ROOT_PATH + "knowledge_graph"
sys.path.append(ROOT_PATH)

import utils.conf as conf
import utils.relationships as rel
import utils.dictionaries as dicts
import utils.poses as poses
import data.dataset_tfrecord_baidu as dstf_baidu
from models.crflinker.net import CRFLinker


#    初始化字典
dicts.load_dict_from_pkl()
#    初始化关系
rel.load_rel_id_from_pkl()


#    crflinker数据集
reader = dstf_baidu.CRFLinkerTFRecordReader(max_sen_len=conf.CRFLINKER.get_max_sentence_len(),
                                            rel_size=len(rel.id_rel))

#    百度训练集，验证集
total = 167608                                                                                          #    总数据量（跑一遍数据集）
batch_size = conf.DATASET_BAIDU.get_batch_size()
max_file_idx = conf.DATASET_BAIDU.get_max_file_idx()                                                    #    取多少个文件
tplinker_dataset_path=conf.DATASET_BAIDU.get_train_tplinker_dataset_path()                              #    取文件总数
if (max_file_idx < 0): files = glob.glob(os.path.dirname(tplinker_dataset_path) + '/*.tfrecord')
else: files = [tplinker_dataset_path.format(i) for i in range(max_file_idx + 1)]
count_file = conf.DATASET_BAIDU.get_record_count()                                                      #    每个文件记录数
sample_total = total if (max_file_idx < 0) else (max_file_idx + 1) * count_file                         #    计算迭代步数
steps_per_epoch = sample_total // batch_size

#    训练集
db_train = reader.tensor_db(dataset_path=conf.DATASET_BAIDU.get_train_crflinker_dataset_path(), max_file_idx=max_file_idx,
                            batch_size=batch_size, 
                            epochs=conf.DATASET_BAIDU.get_epochs(), 
                            shuffle_buffer_rate=conf.DATASET_BAIDU.get_shuffle_buffer_rate())

#    验证集
db_val = reader.tensor_db(dataset_path=conf.DATASET_BAIDU.get_train_crflinker_dataset_path(), max_file_idx=max_file_idx,
                          batch_size=batch_size, 
                          epochs=conf.DATASET_BAIDU.get_epochs(), 
                          shuffle_buffer_rate=conf.DATASET_BAIDU.get_shuffle_buffer_rate())


#    模型
crf_linker = CRFLinker(name='crf_linker', 
                       #    bert相关配置
                       vocab_size=dicts.dict_size(), 
                       n_block=conf.BERT.get_n_block(), 
                       n_head=conf.BERT.get_n_head_attention(), 
                       d_model=conf.BERT.get_d_model(), 
                       f_model=conf.BERT.get_f_model(), 
                       dropout_rate=conf.BERT.get_dropout_rate(), 
                       #    crflinker相关配置
                       max_sen_len=conf.CRFLINKER.get_max_sentence_len(), 
                       rel_size=len(rel.id_rel), 
                       pos_size=poses.bio_tag_size(), 
                       #    训练相关配置
                       batch_size=conf.DATASET_BAIDU.get_batch_size(), 
                       loss_lamda_ner=conf.CRFLINKER.get_loss_lamda_ner(), 
                       loss_lamda_re=conf.CRFLINKER.get_loss_lamda_re(), 
                       learning_rate=conf.CRFLINKER.get_learning_rate(), 
                       input_shape=(None, 2, conf.CRFLINKER.get_max_sentence_len()), 
                       auto_assembling=True, 
                       is_build=True)
crf_linker.show_info()

#    喂数据
crf_linker.train_tensor_db(db_train, db_val, 
                           steps_per_epoch=steps_per_epoch, 
                           batch_size=batch_size, 
                           epochs=conf.DATASET_BAIDU.get_epochs(), 
                           auto_save_weights_after_traind=True, auto_save_weights_dir=conf.CRFLINKER.get_model_save_weights_path(), 
                           auto_learning_rate_schedule=True, 
                           auto_tensorboard=True, auto_tensorboard_dir=conf.CRFLINKER.get_tensorboard_dir_path())





