# -*- coding: utf-8 -*-  
'''
日志组件

@author: luoyi
Created on 2021年3月2日
'''
import yaml
import os
import sys


#    取项目根目录（其他一切相对目录在此基础上拼接）
ROOT_PATH = os.path.abspath(os.path.dirname(__file__)).split('couplet')[0]
ROOT_PATH = ROOT_PATH + "couplet"


#    取配置文件目录
CONF_PATH = ROOT_PATH + "/resources/conf.yml"
#    加载conf.yml配置文件
def load_conf_yaml(yaml_path=CONF_PATH):
    print('加载配置文件:' + CONF_PATH)
    f = open(yaml_path, 'r', encoding='utf-8')
    fr = f.read()
    
#     c = yaml.load(fr, Loader=yaml.SafeLoader)
    c = yaml.safe_load(fr)
    
    #    读取letter相关配置项
    dataset = Dataset(c['dataset']['in_train'], c['dataset']['count_train'], c['dataset']['label_train'],
                      c['dataset']['in_val'], c['dataset']['count_val'], c['dataset']['label_val'],
                      c['dataset']['in_test'], c['dataset']['count_test'], c['dataset']['label_test'],
                      c['dataset']['batch_size'],
                      c['dataset']['epochs'],
                      c['dataset']['shuffle_buffer_rate'])
    
    transformer = Transformer(c['transformer']['num_layers'],
                              c['transformer']['embedding_dims'],
                              c['transformer']['sentence_maxlen'],
                              c['transformer']['self_attention_dims'],
                              c['transformer']['feed_forward_dims'],
                              c['transformer']['mutil_head_attention_groups'],
                              c['transformer']['attention_dropout'],
                              c['transformer']['learning_rate'],
                              c['transformer']['model_save_weights'],
                              c['transformer']['tensorboard_dir'])
    
    gpt2 = Gpt2(c['gpt2']['pre_training_tfrecord_train'],
                c['gpt2']['pre_training_tfrecord_val'],
                c['gpt2']['pre_training_tfrecord_limit'],
                c['gpt2']['pre_training_sentence_maxlen'],
                c['gpt2']['batch_size'],
                c['gpt2']['epochs'],
                c['gpt2']['shuffle_buffer_rate'],
                c['gpt2']['tfrecord_buffer_rate'],
                c['gpt2']['d_model'],
                c['gpt2']['n_head_attention'],
                c['gpt2']['dropout_rate'],
                c['gpt2']['f_model'],
                c['gpt2']['n_block'],
                c['gpt2']['learning_rate'],
                c['gpt2']['model_save_weights'],
                c['gpt2']['tensorboard_dir'],
                c['gpt2']['embedding_weights_path'],
                c['gpt2']['embedding_trainable'])
    
    bert = Bert(neg_prob=c['bert']['neg_prob'],
                rewrite_prob=c['bert']['rewrite_prob'],
                rewrite_max=c['bert']['rewrite_max'],
                rewrite_mask=c['bert']['rewrite_mask'],
                rewrite_original=c['bert']['rewrite_original'],
                rewrite_random=c['bert']['rewrite_random'],
                
                pre_training_tfrecord_train=c['bert']['pre_training_tfrecord_train'],
                pre_training_tfrecord_val=c['bert']['pre_training_tfrecord_val'],
                pre_training_tfrecord_limit=c['bert']['pre_training_tfrecord_limit'],
                pre_training_sentence_maxlen=c['bert']['pre_training_sentence_maxlen'],
                pre_training_max_sentences=c['bert']['pre_training_max_sentences'],
                
                batch_size=c['bert']['batch_size'],
                epochs=c['bert']['epochs'],
                shuffle_buffer_rate=c['bert']['shuffle_buffer_rate'],
                learning_rate=c['bert']['learning_rate'],
                model_save_weights=c['bert']['model_save_weights'],
                tensorboard_dir=c['bert']['tensorboard_dir'],
                
                d_model=c['bert']['d_model'],
                n_head_attention=c['bert']['n_head_attention'],
                dropout_rate=c['bert']['dropout_rate'],
                f_model=c['bert']['f_model'],
                n_block=c['bert']['n_block'],
                lamud_loss_pre_nsp=c['bert']['lamud_loss_pre_nsp'],
                lamud_loss_pre_mlm=c['bert']['lamud_loss_pre_mlm']
                )
    
    text = Text(c['text']['max_len'],
                c['text']['thesaurus'],
                c['text']['dictionaries_path'])
    
    return c, dataset, text, transformer, gpt2, bert


#    验证码识别数据集。为了与Java的风格保持一致
class Dataset:
    def __init__(self, 
                 in_train="", count_train=50000, label_train="",
                 in_val="", count_val=10000, label_val="", 
                 in_test="", count_test=10000, label_test="", 
                 batch_size=2, epochs=2, shuffle_buffer_rate=-1):
        self.__in_train = convert_to_abspath(in_train)
        self.__count_train = count_train
        self.__label_train = convert_to_abspath(label_train)
        
        self.__in_val = convert_to_abspath(in_val)
        self.__count_val = count_val
        self.__label_val = convert_to_abspath(label_val)
        
        self.__in_test = convert_to_abspath(in_test)
        self.__count_test = count_test
        self.__label_test = convert_to_abspath(label_test)
        
        self.__batch_size = batch_size
        self.__epochs = epochs
        self.__shuffle_buffer_rate = shuffle_buffer_rate
        pass
    def get_in_train(self): return convert_to_abspath(self.__in_train)
    def get_count_train(self): return self.__count_train
    def get_label_train(self): return convert_to_abspath(self.__label_train)
    
    def get_in_val(self): return convert_to_abspath(self.__in_val)
    def get_count_val(self): return self.__count_val
    def get_label_val(self): return convert_to_abspath(self.__label_val)
    
    def get_in_test(self): return convert_to_abspath(self.__in_test)
    def get_count_test(self): return self.__count_test
    def get_label_test(self): return convert_to_abspath(self.__label_test)
    
    def get_batch_size(self): return self.__batch_size
    def get_epochs(self): return self.__epochs
    def get_shuffle_buffer_rate(self): return self.__shuffle_buffer_rate
    pass


#    文本相关配置
class Text:
    def __init__(self,
                 max_len=20,
                 thesaurus=[],
                 dictionaries_path=''):
        self._max_len = max_len
        
        #    词库列表转换为系统绝对路径
        abs_thesaurus = []
        for thesauru in thesaurus:
            thesauru = convert_to_abspath('resources/thesaurus/' + thesauru)
            abs_thesaurus.append(thesauru)
            pass
        self._thesaurus = abs_thesaurus
        
        self._dictionaries_path = dictionaries_path        
        pass
    def get_max_len(self): return self._max_len
    def get_thesaurus(self): return self._thesaurus
    def get_dictionaries_path(self): return convert_to_abspath(self._dictionaries_path)
    pass


#    模型相关配置
class Transformer:
    def __init__(self,
                 num_layers=6,
                 embedding_dims=5,
                 sentence_maxlen=32,
                 self_attention_dims=5,
                 feed_forward_dims=5,
                 mutil_head_attention_groups=8,
                 attention_dropout=0.1,
                 
                 learning_rate=0.001,
                 model_save_weights='temp/models',
                 tensorboard_dir='logs/tensorboard'):
        self._num_layers = num_layers
        self._embedding_dims = embedding_dims
        self._sentence_maxlen = sentence_maxlen
        self._self_attention_dims = self_attention_dims
        self._feed_forward_dims = feed_forward_dims
        self._mutil_head_attention_groups = mutil_head_attention_groups
        self._attention_dropout = attention_dropout
        
        self._learning_rate = learning_rate
        self._model_save_weights = model_save_weights
        self._tensorboard_dir = tensorboard_dir
        pass
    def get_num_layers(self): return self._num_layers
    def get_embedding_dims(self): return self._embedding_dims
    def get_sentence_maxlen(self): return self._sentence_maxlen
    def get_self_attention_dims(self): return self._self_attention_dims
    def get_feed_forward_dims(self): return self._feed_forward_dims
    def get_mutil_head_attention_groups(self): return self._mutil_head_attention_groups
    def get_attention_dropout(self): return self._attention_dropout

    def get_learning_rate(self): return self._learning_rate
    def get_model_save_weights(self): return convert_to_abspath(self._model_save_weights)
    def get_tensorboard_dir(self): return convert_to_abspath(self._tensorboard_dir)
    pass


#    GPT2.0相关配置
class Gpt2:
    def __init__(self,
                 pre_training_tfrecord_train = 'temp/cpt2/pre_training/tfrecords/train',
                 pre_training_tfrecord_val = 'temp/cpt2/pre_training/tfrecords/val',
                 pre_training_tfrecord_limit=50000,
                 pre_training_sentence_maxlen=33,
                 
                 batch_size=1,
                 epochs=1,
                 shuffle_buffer_rate=-1,
                 tfrecord_buffer_rate=16,
                 
                 d_model=512,
                 n_head_attention=8,
                 dropout_rate=0.1,
                 f_model=2048,
                 n_block=12,
                 
                 learning_rate=0.001,
                 model_save_weights='temp/models/gpt2',
                 tensorboard_dir='logs/gpt2/tensorboard',
                 embedding_weights_path='resources/embedding/skip_gram_embedding.pkl',
                 embedding_trainable=True):
        self._pre_training_tfrecord_train = pre_training_tfrecord_train
        self._pre_training_tfrecord_val = pre_training_tfrecord_val
        self._pre_training_tfrecord_limit = pre_training_tfrecord_limit
        self._pre_training_sentence_maxlen = pre_training_sentence_maxlen
        
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        self._tfrecord_buffer_rate = tfrecord_buffer_rate
        
        self._d_model = d_model
        self._n_head_attention = n_head_attention
        self._dropout_rate = dropout_rate
        self._f_model = f_model
        self._n_block = n_block
        
        self._learning_rate = learning_rate
        self._model_save_weights = model_save_weights
        self._tensorboard_dir = tensorboard_dir
        self._embedding_weights_path = embedding_weights_path
        self._embedding_trainable = embedding_trainable
        pass
    def get_pre_training_tfrecord_train(self): return convert_to_abspath(self._pre_training_tfrecord_train)
    def get_pre_training_tfrecord_val(self): return convert_to_abspath(self._pre_training_tfrecord_val)
    def get_pre_training_tfrecord_limit(self): return self._pre_training_tfrecord_limit
    def get_pre_training_sentence_maxlen(self): return self._pre_training_sentence_maxlen
    
    def get_batch_size(self): return self._batch_size
    def get_epochs(self): return self._epochs
    def get_shuffle_buffer_rate(self): return self._shuffle_buffer_rate
    def get_tfrecord_buffer_rate(self): return self._tfrecord_buffer_rate
    
    def get_d_model(self): return self._d_model
    def get_n_head_attention(self): return self._n_head_attention
    def get_dropout_rate(self): return self._dropout_rate
    def get_f_model(self): return self._f_model
    def get_n_block(self): return self._n_block
    
    def get_learning_rate(self): return self._learning_rate
    def get_model_save_weights(self): return convert_to_abspath(self._model_save_weights)
    def get_tensorboard_dir(self): return convert_to_abspath(self._tensorboard_dir)
    def get_embedding_weights_path(self): return convert_to_abspath(self._embedding_weights_path)
    def get_embedding_trainable(self): return self._embedding_trainable
    pass


#    Bert相关配置
class Bert:
    def __init__(self,
                 neg_prob=0.5,
                 rewrite_prob=0.15,
                 rewrite_max=10,
                 rewrite_mask=0.8,
                 rewrite_original=0.1,
                 rewrite_random=0.1,
                 
                 pre_training_tfrecord_train = 'temp/bert/pre_training/tfrecords/train',
                 pre_training_tfrecord_val = 'temp/bert/pre_training/tfrecords/val',
                 pre_training_tfrecord_limit=50000,
                 pre_training_sentence_maxlen=67,
                 pre_training_max_sentences=2,
                 
                 d_model=512,
                 n_head_attention=8,
                 dropout_rate=0.1,
                 f_model=2048,
                 n_block=12,
                 
                 batch_size=1,
                 epochs=1,
                 shuffle_buffer_rate=-1,
                 tfrecord_buffer_rate=16,
                 
                 learning_rate=0.001,
                 model_save_weights='temp/models/gpt2',
                 tensorboard_dir='logs/gpt2/tensorboard',
                 lamud_loss_pre_nsp=1,
                 lamud_loss_pre_mlm=1,
                 ):
        self._neg_prob = neg_prob
        self._rewrite_prob = rewrite_prob
        self._rewrite_max = rewrite_max
        self._rewrite_mask = rewrite_mask
        self._rewrite_original = rewrite_original
        self._rewrite_random = rewrite_random
        
        self._pre_training_tfrecord_train = pre_training_tfrecord_train
        self._pre_training_tfrecord_val = pre_training_tfrecord_val
        self._pre_training_tfrecord_limit = pre_training_tfrecord_limit
        self._pre_training_sentence_maxlen = pre_training_sentence_maxlen
        self._pre_training_max_sentences = pre_training_max_sentences
        
        self._d_model = d_model
        self._n_head_attention = n_head_attention
        self._dropout_rate = dropout_rate
        self._f_model = f_model
        self._n_block = n_block
        
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        self._tfrecord_buffer_rate = tfrecord_buffer_rate
        
        self._learning_rate = learning_rate
        self._model_save_weights = model_save_weights
        self._tensorboard_dir = tensorboard_dir
        self._lamud_loss_pre_nsp = lamud_loss_pre_nsp
        self._lamud_loss_pre_mlm = lamud_loss_pre_mlm
        pass
    def get_neg_prob(self): return self._neg_prob
    def get_rewrite_prob(self): return self._rewrite_prob
    def get_rewrite_max(self): return self._rewrite_max
    def get_rewrite_mask(self): return self._rewrite_mask
    def get_rewrite_original(self): return self._rewrite_original
    def get_rewrite_random(self): return self._rewrite_random
    
    def get_pre_training_tfrecord_train(self): return convert_to_abspath(self._pre_training_tfrecord_train)
    def get_pre_training_tfrecord_val(self): return convert_to_abspath(self._pre_training_tfrecord_val)
    def get_pre_training_tfrecord_limit(self): return self._pre_training_tfrecord_limit
    def get_pre_training_sentence_maxlen(self): return self._pre_training_sentence_maxlen
    def get_pre_training_max_sentences(self): return self._pre_training_max_sentences
    
    def get_d_model(self): return self._d_model
    def get_n_head_attention(self): return self._n_head_attention
    def get_dropout_rate(self): return self._dropout_rate
    def get_f_model(self): return self._f_model
    def get_n_block(self): return self._n_block
    
    def get_batch_size(self): return self._batch_size
    def get_epochs(self): return self._epochs
    def get_shuffle_buffer_rate(self): return self._shuffle_buffer_rate
    def get_tfrecord_buffer_rate(self): return self._tfrecord_buffer_rate
    
    def get_learning_rate(self): return self._learning_rate
    def get_model_save_weights(self): return convert_to_abspath(self._model_save_weights)
    def get_tensorboard_dir(self): return convert_to_abspath(self._tensorboard_dir)
    def get_lamud_loss_pre_nsp(self): return self._lamud_loss_pre_nsp
    def get_lamud_loss_pre_mlm(self): return self._lamud_loss_pre_mlm
    pass


#    取配置的绝对目录
def convert_to_abspath(path):
    '''取配置的绝对目录
        "/"开头的目录原样输出
        非"/"开头的目录开头追加项目根目录
    '''
    if (path.startswith("/")):
        return path
    else:
        return ROOT_PATH + "/" + path
    
#    检测文件所在上级目录是否存在，不存在则创建
def mkfiledir_ifnot_exises(filepath):
    '''检测log所在上级目录是否存在，不存在则创建
        @param filepath: 文件目录
    '''
    _dir = os.path.dirname(filepath)
    if (not os.path.exists(_dir)):
        os.makedirs(_dir)
    pass
#    检测目录是否存在，不存在则创建
def mkdir_ifnot_exises(_dir):
    '''检测log所在上级目录是否存在，不存在则创建
        @param dir: 目录
    '''
    if (not os.path.exists(_dir)):
        os.makedirs(_dir)
    pass


#    写入配置文件
def write_conf(_dict, file_path):
    '''写入当前配置项的配置文件
        @param dict: 要写入的配置项字典
        @param file_path: 文件path
    '''
    file_path = convert_to_abspath(file_path)
    mkfiledir_ifnot_exises(file_path)
    
    #    存在同名文件先删除
    if (os.path.exists(file_path)):
        os.remove(file_path)
        pass
    
    fw = open(file_path, mode='w', encoding='utf-8')
    yaml.safe_dump(_dict, fw)
    fw.close()
    pass


#    追加sys.path
def append_sys_path(path):
    path = convert_to_abspath(path)
    sys.path.append(path)
    print(sys.path)
    pass



ALL_DICT, DATASET, TEXT, TRANSFORMER, GPT2, BERT = load_conf_yaml()


