# -*- coding: utf-8 -*-  
'''
解析: resources/conf.yml

@author: luoyi
Created on 2021-07-03
'''
import yaml
import os
import sys


#    取项目根目录（其他一切相对目录在此基础上拼接）
ROOT_PATH = os.path.abspath(os.path.dirname(__file__)).split('knowledge_graph')[0]
ROOT_PATH = ROOT_PATH + "knowledge_graph"


#    取配置文件目录
CONF_PATH = ROOT_PATH + "/resources/conf.yml"
#    加载conf.yml配置文件
def load_conf_yaml(yaml_path=CONF_PATH):
    print('加载配置文件:' + yaml_path)
    f = open(yaml_path, 'r', encoding='utf-8')
    fr = f.read()
    
    c = yaml.safe_load(fr)
    
    
    bert = BertConfParser(
                neg_prob = c['bert']['neg_prob'],
                rewrite_prob = c['bert']['rewrite_prob'],
                rewrite_max = c['bert']['rewrite_max'],
                rewrite_mask = c['bert']['rewrite_mask'],
                rewrite_original = c['bert']['rewrite_original'],
                rewrite_random = c['bert']['rewrite_random'],
                pre_training_tfrecord_train_path = c['bert']['pre_training_tfrecord_train_path'],
                pre_training_tfrecord_val_path = c['bert']['pre_training_tfrecord_val_path'],
                pre_training_tfrecord_limit = c['bert']['pre_training_tfrecord_limit'],
                pre_training_sentence_maxlen = c['bert']['pre_training_sentence_maxlen'],
                pre_training_max_sentences = c['bert']['pre_training_max_sentences'],
                batch_size = c['bert']['batch_size'],
                epochs = c['bert']['epochs'],
                shuffle_buffer_rate = c['bert']['shuffle_buffer_rate'],
                tfrecord_buffer_rate = c['bert']['tfrecord_buffer_rate'],
                learning_rate = c['bert']['learning_rate'],
                model_save_weights_path = c['bert']['model_save_weights_path'],
                tensorboard_dir_path = c['bert']['tensorboard_dir_path'],
                d_model = c['bert']['d_model'],
                n_head_attention = c['bert']['n_head_attention'],
                dropout_rate = c['bert']['dropout_rate'],
                f_model = c['bert']['f_model'],
                n_block = c['bert']['n_block'],
                lamud_loss_pre_nsp = c['bert']['lamud_loss_pre_nsp'],
                lamud_loss_pre_mlm = c['bert']['lamud_loss_pre_mlm'],
                
                )
    
    dataset_baidu = Dataset_baiduConfParser(
                schemas_path = c['dataset_baidu']['schemas_path'],
                train_data_path = c['dataset_baidu']['train_data_path'],
                val_data_path = c['dataset_baidu']['val_data_path'],
                rel_id_path = c['dataset_baidu']['rel_id_path'],
                id_rel_path = c['dataset_baidu']['id_rel_path'],
                train_tplinker_dataset_path = c['dataset_baidu']['train_tplinker_dataset_path'],
                val_tplinker_dataset_path = c['dataset_baidu']['val_tplinker_dataset_path'],
                train_crflinker_dataset_path = c['dataset_baidu']['train_crflinker_dataset_path'],
                val_crflinker_dataset_path = c['dataset_baidu']['val_crflinker_dataset_path'],
                max_file_idx = c['dataset_baidu']['max_file_idx'],
                record_count = c['dataset_baidu']['record_count'],
                train_count = c['dataset_baidu']['train_count'],
                val_count = c['dataset_baidu']['val_count'],
                batch_size = c['dataset_baidu']['batch_size'],
                epochs = c['dataset_baidu']['epochs'],
                shuffle_buffer_rate = c['dataset_baidu']['shuffle_buffer_rate'],
                tfrecord_buffer_rate = c['dataset_baidu']['tfrecord_buffer_rate'],
                
                )
    
    tplinker = TplinkerConfParser(
                max_sentence_len = c['tplinker']['max_sentence_len'],
                loss_lamda_ner = c['tplinker']['loss_lamda_ner'],
                loss_lamda_re = c['tplinker']['loss_lamda_re'],
                learning_rate = c['tplinker']['learning_rate'],
                tensorboard_dir_path = c['tplinker']['tensorboard_dir_path'],
                model_save_weights_path = c['tplinker']['model_save_weights_path'],
                
                )
    
    crflinker = CrflinkerConfParser(
                max_sentence_len = c['crflinker']['max_sentence_len'],
                loss_lamda_ner = c['crflinker']['loss_lamda_ner'],
                loss_lamda_re = c['crflinker']['loss_lamda_re'],
                learning_rate = c['crflinker']['learning_rate'],
                tensorboard_dir_path = c['crflinker']['tensorboard_dir_path'],
                model_save_weights_path = c['crflinker']['model_save_weights_path'],
                
                )
    
    text = TextConfParser(
                dictionaries_chchars_path = c['text']['dictionaries_chchars_path'],
                dictionaries_word2dict_pkl_path = c['text']['dictionaries_word2dict_pkl_path'],
                dictionaries_dict2word_pkl_path = c['text']['dictionaries_dict2word_pkl_path'],
                
                )
    
    
    return c, bert, dataset_baidu, tplinker, crflinker, text



class BertConfParser:
    def __init__(self,
                neg_prob = 0.5,
                rewrite_prob = 0.15,
                rewrite_max = 10,
                rewrite_mask = 0.8,
                rewrite_original = 0.1,
                rewrite_random = 0.1,
                pre_training_tfrecord_train_path = 'temp/bert/pre_training/tfrecords/train',
                pre_training_tfrecord_val_path = 'temp/bert/pre_training/tfrecords/val',
                pre_training_tfrecord_limit = 50000,
                pre_training_sentence_maxlen = 67,
                pre_training_max_sentences = 3,
                batch_size = 2,
                epochs = 10,
                shuffle_buffer_rate = -1,
                tfrecord_buffer_rate = 8,
                learning_rate = 0.0001,
                model_save_weights_path = 'temp/models/bert',
                tensorboard_dir_path = 'logs/bert/tensorboard',
                d_model = 768,
                n_head_attention = 12,
                dropout_rate = 0.1,
                f_model = 3072,
                n_block = 12,
                lamud_loss_pre_nsp = 1,
                lamud_loss_pre_mlm = 1,
                
                ):
        self._neg_prob = neg_prob
        self._rewrite_prob = rewrite_prob
        self._rewrite_max = rewrite_max
        self._rewrite_mask = rewrite_mask
        self._rewrite_original = rewrite_original
        self._rewrite_random = rewrite_random
        self._pre_training_tfrecord_train_path = pre_training_tfrecord_train_path
        self._pre_training_tfrecord_val_path = pre_training_tfrecord_val_path
        self._pre_training_tfrecord_limit = pre_training_tfrecord_limit
        self._pre_training_sentence_maxlen = pre_training_sentence_maxlen
        self._pre_training_max_sentences = pre_training_max_sentences
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        self._tfrecord_buffer_rate = tfrecord_buffer_rate
        self._learning_rate = learning_rate
        self._model_save_weights_path = model_save_weights_path
        self._tensorboard_dir_path = tensorboard_dir_path
        self._d_model = d_model
        self._n_head_attention = n_head_attention
        self._dropout_rate = dropout_rate
        self._f_model = f_model
        self._n_block = n_block
        self._lamud_loss_pre_nsp = lamud_loss_pre_nsp
        self._lamud_loss_pre_mlm = lamud_loss_pre_mlm
        
        pass
        
    def get_neg_prob(self): return self._neg_prob
    def get_rewrite_prob(self): return self._rewrite_prob
    def get_rewrite_max(self): return self._rewrite_max
    def get_rewrite_mask(self): return self._rewrite_mask
    def get_rewrite_original(self): return self._rewrite_original
    def get_rewrite_random(self): return self._rewrite_random
    def get_pre_training_tfrecord_train_path(self): return convert_to_abspath(self._pre_training_tfrecord_train_path)
    def get_pre_training_tfrecord_val_path(self): return convert_to_abspath(self._pre_training_tfrecord_val_path)
    def get_pre_training_tfrecord_limit(self): return self._pre_training_tfrecord_limit
    def get_pre_training_sentence_maxlen(self): return self._pre_training_sentence_maxlen
    def get_pre_training_max_sentences(self): return self._pre_training_max_sentences
    def get_batch_size(self): return self._batch_size
    def get_epochs(self): return self._epochs
    def get_shuffle_buffer_rate(self): return self._shuffle_buffer_rate
    def get_tfrecord_buffer_rate(self): return self._tfrecord_buffer_rate
    def get_learning_rate(self): return self._learning_rate
    def get_model_save_weights_path(self): return convert_to_abspath(self._model_save_weights_path)
    def get_tensorboard_dir_path(self): return convert_to_abspath(self._tensorboard_dir_path)
    def get_d_model(self): return self._d_model
    def get_n_head_attention(self): return self._n_head_attention
    def get_dropout_rate(self): return self._dropout_rate
    def get_f_model(self): return self._f_model
    def get_n_block(self): return self._n_block
    def get_lamud_loss_pre_nsp(self): return self._lamud_loss_pre_nsp
    def get_lamud_loss_pre_mlm(self): return self._lamud_loss_pre_mlm
    
    pass

class Dataset_baiduConfParser:
    def __init__(self,
                schemas_path = 'temp/data/baidu/original/all_50_schemas',
                train_data_path = 'temp/data/baidu/original/train_data.json',
                val_data_path = 'temp/data/baidu/original/dev_data.json',
                rel_id_path = 'temp/data/baidu/relationships/rel_id.pkl',
                id_rel_path = 'temp/data/baidu/relationships/id_rel.pkl',
                train_tplinker_dataset_path = 'temp/data/baidu/tfrecord/train/train_tplinker_{}.tfrecord',
                val_tplinker_dataset_path = 'temp/data/baidu/tfrecord/val/val_tplinker_{}.tfrecord',
                train_crflinker_dataset_path = 'temp/data/baidu/tfrecord/train/train_crflinker_{}.tfrecord',
                val_crflinker_dataset_path = 'temp/data/baidu/tfrecord/val/val_crflinker_{}.tfrecord',
                max_file_idx = 0,
                record_count = 8192,
                train_count = 100,
                val_count = 128,
                batch_size = 2,
                epochs = 10,
                shuffle_buffer_rate = -1,
                tfrecord_buffer_rate = 8,
                
                ):
        self._schemas_path = schemas_path
        self._train_data_path = train_data_path
        self._val_data_path = val_data_path
        self._rel_id_path = rel_id_path
        self._id_rel_path = id_rel_path
        self._train_tplinker_dataset_path = train_tplinker_dataset_path
        self._val_tplinker_dataset_path = val_tplinker_dataset_path
        self._train_crflinker_dataset_path = train_crflinker_dataset_path
        self._val_crflinker_dataset_path = val_crflinker_dataset_path
        self._max_file_idx = max_file_idx
        self._record_count = record_count
        self._train_count = train_count
        self._val_count = val_count
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        self._tfrecord_buffer_rate = tfrecord_buffer_rate
        
        pass
        
    def get_schemas_path(self): return convert_to_abspath(self._schemas_path)
    def get_train_data_path(self): return convert_to_abspath(self._train_data_path)
    def get_val_data_path(self): return convert_to_abspath(self._val_data_path)
    def get_rel_id_path(self): return convert_to_abspath(self._rel_id_path)
    def get_id_rel_path(self): return convert_to_abspath(self._id_rel_path)
    def get_train_tplinker_dataset_path(self): return convert_to_abspath(self._train_tplinker_dataset_path)
    def get_val_tplinker_dataset_path(self): return convert_to_abspath(self._val_tplinker_dataset_path)
    def get_train_crflinker_dataset_path(self): return convert_to_abspath(self._train_crflinker_dataset_path)
    def get_val_crflinker_dataset_path(self): return convert_to_abspath(self._val_crflinker_dataset_path)
    def get_max_file_idx(self): return self._max_file_idx
    def get_record_count(self): return self._record_count
    def get_train_count(self): return self._train_count
    def get_val_count(self): return self._val_count
    def get_batch_size(self): return self._batch_size
    def get_epochs(self): return self._epochs
    def get_shuffle_buffer_rate(self): return self._shuffle_buffer_rate
    def get_tfrecord_buffer_rate(self): return self._tfrecord_buffer_rate
    
    pass

class TplinkerConfParser:
    def __init__(self,
                max_sentence_len = 128,
                loss_lamda_ner = 1,
                loss_lamda_re = 1,
                learning_rate = 0.001,
                tensorboard_dir_path = 'logs/tplinker/tensorboard',
                model_save_weights_path = 'temp/models/tplinker',
                
                ):
        self._max_sentence_len = max_sentence_len
        self._loss_lamda_ner = loss_lamda_ner
        self._loss_lamda_re = loss_lamda_re
        self._learning_rate = learning_rate
        self._tensorboard_dir_path = tensorboard_dir_path
        self._model_save_weights_path = model_save_weights_path
        
        pass
        
    def get_max_sentence_len(self): return self._max_sentence_len
    def get_loss_lamda_ner(self): return self._loss_lamda_ner
    def get_loss_lamda_re(self): return self._loss_lamda_re
    def get_learning_rate(self): return self._learning_rate
    def get_tensorboard_dir_path(self): return convert_to_abspath(self._tensorboard_dir_path)
    def get_model_save_weights_path(self): return convert_to_abspath(self._model_save_weights_path)
    
    pass

class CrflinkerConfParser:
    def __init__(self,
                max_sentence_len = 128,
                loss_lamda_ner = 1,
                loss_lamda_re = 1,
                learning_rate = 0.001,
                tensorboard_dir_path = 'logs/crflinker/tensorboard',
                model_save_weights_path = 'temp/models/crflinker',
                
                ):
        self._max_sentence_len = max_sentence_len
        self._loss_lamda_ner = loss_lamda_ner
        self._loss_lamda_re = loss_lamda_re
        self._learning_rate = learning_rate
        self._tensorboard_dir_path = tensorboard_dir_path
        self._model_save_weights_path = model_save_weights_path
        
        pass
        
    def get_max_sentence_len(self): return self._max_sentence_len
    def get_loss_lamda_ner(self): return self._loss_lamda_ner
    def get_loss_lamda_re(self): return self._loss_lamda_re
    def get_learning_rate(self): return self._learning_rate
    def get_tensorboard_dir_path(self): return convert_to_abspath(self._tensorboard_dir_path)
    def get_model_save_weights_path(self): return convert_to_abspath(self._model_save_weights_path)
    
    pass

class TextConfParser:
    def __init__(self,
                dictionaries_chchars_path = 'temp/dictionaries/chchars_strokes.txt',
                dictionaries_word2dict_pkl_path = 'temp/dictionaries/word2dict.pkl',
                dictionaries_dict2word_pkl_path = 'temp/dictionaries/dict2word.pkl',
                
                ):
        self._dictionaries_chchars_path = dictionaries_chchars_path
        self._dictionaries_word2dict_pkl_path = dictionaries_word2dict_pkl_path
        self._dictionaries_dict2word_pkl_path = dictionaries_dict2word_pkl_path
        
        pass
        
    def get_dictionaries_chchars_path(self): return convert_to_abspath(self._dictionaries_chchars_path)
    def get_dictionaries_word2dict_pkl_path(self): return convert_to_abspath(self._dictionaries_word2dict_pkl_path)
    def get_dictionaries_dict2word_pkl_path(self): return convert_to_abspath(self._dictionaries_dict2word_pkl_path)
    
    pass



#    取配置的绝对目录
def convert_to_abspath(path):
    '''取配置的绝对目录
        "/"开头的目录原样输出
        非"/"开头的目录开头追加项目根目录
    '''
    if (path.startswith("/")):
        return path
    else:
        return ROOT_PATH + "/" + path
    
#    检测文件所在上级目录是否存在，不存在则创建
def mkfiledir_ifnot_exises(filepath):
    '''检测log所在上级目录是否存在，不存在则创建
        @param filepath: 文件目录
    '''
    _dir = os.path.dirname(filepath)
    if (not os.path.exists(_dir)):
        os.makedirs(_dir)
    pass
#    检测目录是否存在，不存在则创建
def mkdir_ifnot_exises(_dir):
    '''检测log所在上级目录是否存在，不存在则创建
        @param dir: 目录
    '''
    if (not os.path.exists(_dir)):
        os.makedirs(_dir)
    pass


#    写入配置文件
def write_conf(_dict, file_path):
    '''写入当前配置项的配置文件
        @param dict: 要写入的配置项字典
        @param file_path: 文件path
    '''
    file_path = convert_to_abspath(file_path)
    mkfiledir_ifnot_exises(file_path)
    
    #    存在同名文件先删除
    if (os.path.exists(file_path)):
        os.remove(file_path)
        pass
    
    fw = open(file_path, mode='w', encoding='utf-8')
    yaml.safe_dump(_dict, fw)
    fw.close()
    pass


#    追加sys.path
def append_sys_path(path):
    path = convert_to_abspath(path)
    sys.path.append(path)
    print(sys.path)
    pass


#	外部访问的属性
ALL_DICT, BERT, DATASET_BAIDU, TPLINKER, CRFLINKER, TEXT = load_conf_yaml()


