# -*- coding: utf-8 -*-  
'''
解析: resources/conf.yml

@author: luoyi
Created on 2021-09-13
'''
import yaml
import os
import sys


#    取项目根目录（其他一切相对目录在此基础上拼接）
ROOT_PATH = os.path.abspath(os.path.dirname(__file__)).split('tbert')[0]
ROOT_PATH = ROOT_PATH + "tbert"


#    取配置文件目录
CONF_PATH = ROOT_PATH + "/resources/conf.yml"
#    加载conf.yml配置文件
def load_conf_yaml(yaml_path=CONF_PATH):
    print('加载配置文件:' + yaml_path)
    f = open(yaml_path, 'r', encoding='utf-8')
    fr = f.read()
    
    c = yaml.safe_load(fr)
    
    
    stop_words = Stop_wordsConfParser(
                file_path = c['stop_words']['file_path'],
                
                )
    
    dataset_sohu_news = Dataset_sohu_newsConfParser(
                reduced_dir_path = c['dataset_sohu_news']['reduced_dir_path'],
                titles_path = c['dataset_sohu_news']['titles_path'],
                contents_path = c['dataset_sohu_news']['contents_path'],
                words_path = c['dataset_sohu_news']['words_path'],
                words_count = c['dataset_sohu_news']['words_count'],
                train_words_path = c['dataset_sohu_news']['train_words_path'],
                val_words_path = c['dataset_sohu_news']['val_words_path'],
                word_id_path = c['dataset_sohu_news']['word_id_path'],
                word_frequency_path = c['dataset_sohu_news']['word_frequency_path'],
                batch_size = c['dataset_sohu_news']['batch_size'],
                epochs = c['dataset_sohu_news']['epochs'],
                shuffle_buffer_rate = c['dataset_sohu_news']['shuffle_buffer_rate'],
                tfrecord_buffer_rate = c['dataset_sohu_news']['tfrecord_buffer_rate'],
                
                )
    
    dataset_sohu_THUCNews = Dataset_sohu_thucnewsConfParser(
                original_dir_path = c['dataset_sohu_THUCNews']['original_dir_path'],
                word_frequency_path = c['dataset_sohu_THUCNews']['word_frequency_path'],
                word_id_path = c['dataset_sohu_THUCNews']['word_id_path'],
                words_path = c['dataset_sohu_THUCNews']['words_path'],
                words_count = c['dataset_sohu_THUCNews']['words_count'],
                pre_training_train_path = c['dataset_sohu_THUCNews']['pre_training_train_path'],
                pre_training_train_count = c['dataset_sohu_THUCNews']['pre_training_train_count'],
                pre_training_val_path = c['dataset_sohu_THUCNews']['pre_training_val_path'],
                pre_training_val_count = c['dataset_sohu_THUCNews']['pre_training_val_count'],
                pre_tfrecord_limit = c['dataset_sohu_THUCNews']['pre_tfrecord_limit'],
                tbert_training_train_path = c['dataset_sohu_THUCNews']['tbert_training_train_path'],
                tbert_training_train_count = c['dataset_sohu_THUCNews']['tbert_training_train_count'],
                tbert_training_val_path = c['dataset_sohu_THUCNews']['tbert_training_val_path'],
                tbert_training_val_count = c['dataset_sohu_THUCNews']['tbert_training_val_count'],
                tbert_tfrecord_limit = c['dataset_sohu_THUCNews']['tbert_tfrecord_limit'],
                batch_size = c['dataset_sohu_THUCNews']['batch_size'],
                epochs = c['dataset_sohu_THUCNews']['epochs'],
                shuffle_buffer_rate = c['dataset_sohu_THUCNews']['shuffle_buffer_rate'],
                tfrecord_buffer_rate = c['dataset_sohu_THUCNews']['tfrecord_buffer_rate'],
                
                )
    
    lda = LdaConfParser(
                k = c['lda']['k'],
                max_doc_words = c['lda']['max_doc_words'],
                wn_topic_word_path = c['lda']['wn_topic_word_path'],
                wn_topic_path = c['lda']['wn_topic_path'],
                dn_topic_path = c['lda']['dn_topic_path'],
                doc_word_topic_path = c['lda']['doc_word_topic_path'],
                auto_save_batch = c['lda']['auto_save_batch'],
                auto_save_epoch = c['lda']['auto_save_epoch'],
                
                )
    
    gsdmm = GsdmmConfParser(
                k = c['gsdmm']['k'],
                max_doc_words = c['gsdmm']['max_doc_words'],
                wn_topic_word_path = c['gsdmm']['wn_topic_word_path'],
                wn_topic_path = c['gsdmm']['wn_topic_path'],
                dn_topic_path = c['gsdmm']['dn_topic_path'],
                auto_save_batch = c['gsdmm']['auto_save_batch'],
                auto_save_epoch = c['gsdmm']['auto_save_epoch'],
                
                )
    
    bert = BertConfParser(
                neg_prob = c['bert']['neg_prob'],
                rewrite_prob = c['bert']['rewrite_prob'],
                rewrite_max = c['bert']['rewrite_max'],
                rewrite_mask = c['bert']['rewrite_mask'],
                rewrite_original = c['bert']['rewrite_original'],
                rewrite_random = c['bert']['rewrite_random'],
                learning_rate = c['bert']['learning_rate'],
                model_save_weights_path = c['bert']['model_save_weights_path'],
                tensorboard_dir_path = c['bert']['tensorboard_dir_path'],
                max_sen_len = c['bert']['max_sen_len'],
                d_model = c['bert']['d_model'],
                n_head_attention = c['bert']['n_head_attention'],
                dropout_rate = c['bert']['dropout_rate'],
                f_model = c['bert']['f_model'],
                n_block = c['bert']['n_block'],
                lamud_loss_pre_nsp = c['bert']['lamud_loss_pre_nsp'],
                lamud_loss_pre_mlm = c['bert']['lamud_loss_pre_mlm'],
                
                )
    
    tbert = TbertConfParser(
                k = c['tbert']['k'],
                classify_fc1_hidden = c['tbert']['classify_fc1_hidden'],
                classify_fc1_dropout = c['tbert']['classify_fc1_dropout'],
                classify_fc2_hidden = c['tbert']['classify_fc2_hidden'],
                classify_fc2_dropout = c['tbert']['classify_fc2_dropout'],
                max_sen = c['tbert']['max_sen'],
                max_sen_len = c['tbert']['max_sen_len'],
                learning_rate = c['tbert']['learning_rate'],
                model_save_weights_path = c['tbert']['model_save_weights_path'],
                tensorboard_dir_path = c['tbert']['tensorboard_dir_path'],
                
                )
    
    
    return c, stop_words, dataset_sohu_news, dataset_sohu_THUCNews, lda, gsdmm, bert, tbert



class Stop_wordsConfParser:
    def __init__(self,
                file_path = 'resources/stop_words.txt',
                
                ):
        self._file_path = file_path
        
        pass
        
    def get_file_path(self): return convert_to_abspath(self._file_path)
    
    pass

class Dataset_sohu_newsConfParser:
    def __init__(self,
                reduced_dir_path = 'temp/sohu_news/SogouCS.reduced',
                titles_path = 'temp/sohu_news/docs/titles/titles_{}.txt',
                contents_path = 'temp/sohu_news/docs/contents/contents_{}.txt',
                words_path = 'temp/sohu_news/docs/words/words_{}.txt',
                words_count = 65536,
                train_words_path = 'temp/sohu_news/words/train',
                val_words_path = 'temp/sohu_news/words/val',
                word_id_path = 'temp/sohu_news/words',
                word_frequency_path = 'temp/sohu_news/words',
                batch_size = 32,
                epochs = 1,
                shuffle_buffer_rate = -1,
                tfrecord_buffer_rate = 8,
                
                ):
        self._reduced_dir_path = reduced_dir_path
        self._titles_path = titles_path
        self._contents_path = contents_path
        self._words_path = words_path
        self._words_count = words_count
        self._train_words_path = train_words_path
        self._val_words_path = val_words_path
        self._word_id_path = word_id_path
        self._word_frequency_path = word_frequency_path
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        self._tfrecord_buffer_rate = tfrecord_buffer_rate
        
        pass
        
    def get_reduced_dir_path(self): return convert_to_abspath(self._reduced_dir_path)
    def get_titles_path(self): return convert_to_abspath(self._titles_path)
    def get_contents_path(self): return convert_to_abspath(self._contents_path)
    def get_words_path(self): return convert_to_abspath(self._words_path)
    def get_words_count(self): return self._words_count
    def get_train_words_path(self): return convert_to_abspath(self._train_words_path)
    def get_val_words_path(self): return convert_to_abspath(self._val_words_path)
    def get_word_id_path(self): return convert_to_abspath(self._word_id_path)
    def get_word_frequency_path(self): return convert_to_abspath(self._word_frequency_path)
    def get_batch_size(self): return self._batch_size
    def get_epochs(self): return self._epochs
    def get_shuffle_buffer_rate(self): return self._shuffle_buffer_rate
    def get_tfrecord_buffer_rate(self): return self._tfrecord_buffer_rate
    
    pass

class Dataset_sohu_thucnewsConfParser:
    def __init__(self,
                original_dir_path = '/Users/irenebritney/Desktop/NLP/语料库/THUCNews/data/THUCNews',
                word_frequency_path = 'temp/sohu_thuc_news/words',
                word_id_path = 'temp/sohu_thuc_news/words',
                words_path = 'temp/sohu_thuc_news/pre_training/words/words_{}.txt',
                words_count = 16384,
                pre_training_train_path = 'temp/sohu_thuc_news/pre_training/bert/tfrecord/train',
                pre_training_train_count = 262144,
                pre_training_val_path = 'temp/sohu_thuc_news/pre_training/bert/tfrecord/val',
                pre_training_val_count = 256,
                pre_tfrecord_limit = 32768,
                tbert_training_train_path = 'temp/sohu_thuc_news/training/tbert/tfrecord/train',
                tbert_training_train_count = 262144,
                tbert_training_val_path = 'temp/sohu_thuc_news/training/tbert/tfrecord/val',
                tbert_training_val_count = 256,
                tbert_tfrecord_limit = 32768,
                batch_size = 32,
                epochs = 20,
                shuffle_buffer_rate = -1,
                tfrecord_buffer_rate = 8,
                
                ):
        self._original_dir_path = original_dir_path
        self._word_frequency_path = word_frequency_path
        self._word_id_path = word_id_path
        self._words_path = words_path
        self._words_count = words_count
        self._pre_training_train_path = pre_training_train_path
        self._pre_training_train_count = pre_training_train_count
        self._pre_training_val_path = pre_training_val_path
        self._pre_training_val_count = pre_training_val_count
        self._pre_tfrecord_limit = pre_tfrecord_limit
        self._tbert_training_train_path = tbert_training_train_path
        self._tbert_training_train_count = tbert_training_train_count
        self._tbert_training_val_path = tbert_training_val_path
        self._tbert_training_val_count = tbert_training_val_count
        self._tbert_tfrecord_limit = tbert_tfrecord_limit
        self._batch_size = batch_size
        self._epochs = epochs
        self._shuffle_buffer_rate = shuffle_buffer_rate
        self._tfrecord_buffer_rate = tfrecord_buffer_rate
        
        pass
        
    def get_original_dir_path(self): return convert_to_abspath(self._original_dir_path)
    def get_word_frequency_path(self): return convert_to_abspath(self._word_frequency_path)
    def get_word_id_path(self): return convert_to_abspath(self._word_id_path)
    def get_words_path(self): return convert_to_abspath(self._words_path)
    def get_words_count(self): return self._words_count
    def get_pre_training_train_path(self): return convert_to_abspath(self._pre_training_train_path)
    def get_pre_training_train_count(self): return self._pre_training_train_count
    def get_pre_training_val_path(self): return convert_to_abspath(self._pre_training_val_path)
    def get_pre_training_val_count(self): return self._pre_training_val_count
    def get_pre_tfrecord_limit(self): return self._pre_tfrecord_limit
    def get_tbert_training_train_path(self): return convert_to_abspath(self._tbert_training_train_path)
    def get_tbert_training_train_count(self): return self._tbert_training_train_count
    def get_tbert_training_val_path(self): return convert_to_abspath(self._tbert_training_val_path)
    def get_tbert_training_val_count(self): return self._tbert_training_val_count
    def get_tbert_tfrecord_limit(self): return self._tbert_tfrecord_limit
    def get_batch_size(self): return self._batch_size
    def get_epochs(self): return self._epochs
    def get_shuffle_buffer_rate(self): return self._shuffle_buffer_rate
    def get_tfrecord_buffer_rate(self): return self._tfrecord_buffer_rate
    
    pass

class LdaConfParser:
    def __init__(self,
                k = 64,
                max_doc_words = 8192,
                wn_topic_word_path = 'temp/models/lda/wn_topic_word.pkl',
                wn_topic_path = 'temp/models/lda/wn_topic.pkl',
                dn_topic_path = 'temp/models/lda/dn_topic.pkl',
                doc_word_topic_path = 'temp/models/lda/doc_word_topic.pkl',
                auto_save_batch = 200,
                auto_save_epoch = 1,
                
                ):
        self._k = k
        self._max_doc_words = max_doc_words
        self._wn_topic_word_path = wn_topic_word_path
        self._wn_topic_path = wn_topic_path
        self._dn_topic_path = dn_topic_path
        self._doc_word_topic_path = doc_word_topic_path
        self._auto_save_batch = auto_save_batch
        self._auto_save_epoch = auto_save_epoch
        
        pass
        
    def get_k(self): return self._k
    def get_max_doc_words(self): return self._max_doc_words
    def get_wn_topic_word_path(self): return convert_to_abspath(self._wn_topic_word_path)
    def get_wn_topic_path(self): return convert_to_abspath(self._wn_topic_path)
    def get_dn_topic_path(self): return convert_to_abspath(self._dn_topic_path)
    def get_doc_word_topic_path(self): return convert_to_abspath(self._doc_word_topic_path)
    def get_auto_save_batch(self): return self._auto_save_batch
    def get_auto_save_epoch(self): return self._auto_save_epoch
    
    pass

class GsdmmConfParser:
    def __init__(self,
                k = 64,
                max_doc_words = 8192,
                wn_topic_word_path = 'temp/models/gsdmm/wn_topic_word.pkl',
                wn_topic_path = 'temp/models/gsdmm/wn_topic.pkl',
                dn_topic_path = 'temp/models/gsdmm/dn_topic.pkl',
                auto_save_batch = 200,
                auto_save_epoch = 1,
                
                ):
        self._k = k
        self._max_doc_words = max_doc_words
        self._wn_topic_word_path = wn_topic_word_path
        self._wn_topic_path = wn_topic_path
        self._dn_topic_path = dn_topic_path
        self._auto_save_batch = auto_save_batch
        self._auto_save_epoch = auto_save_epoch
        
        pass
        
    def get_k(self): return self._k
    def get_max_doc_words(self): return self._max_doc_words
    def get_wn_topic_word_path(self): return convert_to_abspath(self._wn_topic_word_path)
    def get_wn_topic_path(self): return convert_to_abspath(self._wn_topic_path)
    def get_dn_topic_path(self): return convert_to_abspath(self._dn_topic_path)
    def get_auto_save_batch(self): return self._auto_save_batch
    def get_auto_save_epoch(self): return self._auto_save_epoch
    
    pass

class BertConfParser:
    def __init__(self,
                neg_prob = 0.5,
                rewrite_prob = 0.15,
                rewrite_max = 16,
                rewrite_mask = 0.8,
                rewrite_original = 0.1,
                rewrite_random = 0.1,
                learning_rate = 0.0001,
                model_save_weights_path = 'temp/models/bert',
                tensorboard_dir_path = 'logs/bert/tensorboard',
                max_sen_len = 64,
                d_model = 768,
                n_head_attention = 12,
                dropout_rate = 0.1,
                f_model = 3072,
                n_block = 12,
                lamud_loss_pre_nsp = 1,
                lamud_loss_pre_mlm = 1,
                
                ):
        self._neg_prob = neg_prob
        self._rewrite_prob = rewrite_prob
        self._rewrite_max = rewrite_max
        self._rewrite_mask = rewrite_mask
        self._rewrite_original = rewrite_original
        self._rewrite_random = rewrite_random
        self._learning_rate = learning_rate
        self._model_save_weights_path = model_save_weights_path
        self._tensorboard_dir_path = tensorboard_dir_path
        self._max_sen_len = max_sen_len
        self._d_model = d_model
        self._n_head_attention = n_head_attention
        self._dropout_rate = dropout_rate
        self._f_model = f_model
        self._n_block = n_block
        self._lamud_loss_pre_nsp = lamud_loss_pre_nsp
        self._lamud_loss_pre_mlm = lamud_loss_pre_mlm
        
        pass
        
    def get_neg_prob(self): return self._neg_prob
    def get_rewrite_prob(self): return self._rewrite_prob
    def get_rewrite_max(self): return self._rewrite_max
    def get_rewrite_mask(self): return self._rewrite_mask
    def get_rewrite_original(self): return self._rewrite_original
    def get_rewrite_random(self): return self._rewrite_random
    def get_learning_rate(self): return self._learning_rate
    def get_model_save_weights_path(self): return convert_to_abspath(self._model_save_weights_path)
    def get_tensorboard_dir_path(self): return convert_to_abspath(self._tensorboard_dir_path)
    def get_max_sen_len(self): return self._max_sen_len
    def get_d_model(self): return self._d_model
    def get_n_head_attention(self): return self._n_head_attention
    def get_dropout_rate(self): return self._dropout_rate
    def get_f_model(self): return self._f_model
    def get_n_block(self): return self._n_block
    def get_lamud_loss_pre_nsp(self): return self._lamud_loss_pre_nsp
    def get_lamud_loss_pre_mlm(self): return self._lamud_loss_pre_mlm
    
    pass

class TbertConfParser:
    def __init__(self,
                k = 14,
                classify_fc1_hidden = 1024,
                classify_fc1_dropout = 0.1,
                classify_fc2_hidden = 256,
                classify_fc2_dropout = 0.1,
                max_sen = 64,
                max_sen_len = 256,
                learning_rate = 0.0001,
                model_save_weights_path = 'temp/models/tberts',
                tensorboard_dir_path = 'logs/tbert/tensorboard',
                
                ):
        self._k = k
        self._classify_fc1_hidden = classify_fc1_hidden
        self._classify_fc1_dropout = classify_fc1_dropout
        self._classify_fc2_hidden = classify_fc2_hidden
        self._classify_fc2_dropout = classify_fc2_dropout
        self._max_sen = max_sen
        self._max_sen_len = max_sen_len
        self._learning_rate = learning_rate
        self._model_save_weights_path = model_save_weights_path
        self._tensorboard_dir_path = tensorboard_dir_path
        
        pass
        
    def get_k(self): return self._k
    def get_classify_fc1_hidden(self): return self._classify_fc1_hidden
    def get_classify_fc1_dropout(self): return self._classify_fc1_dropout
    def get_classify_fc2_hidden(self): return self._classify_fc2_hidden
    def get_classify_fc2_dropout(self): return self._classify_fc2_dropout
    def get_max_sen(self): return self._max_sen
    def get_max_sen_len(self): return self._max_sen_len
    def get_learning_rate(self): return self._learning_rate
    def get_model_save_weights_path(self): return convert_to_abspath(self._model_save_weights_path)
    def get_tensorboard_dir_path(self): return convert_to_abspath(self._tensorboard_dir_path)
    
    pass



#    取配置的绝对目录
def convert_to_abspath(path):
    '''取配置的绝对目录
        "/"开头的目录原样输出
        非"/"开头的目录开头追加项目根目录
    '''
    if (path.startswith("/")):
        return path
    else:
        return ROOT_PATH + "/" + path
    
#    检测文件所在上级目录是否存在，不存在则创建
def mkfiledir_ifnot_exises(filepath):
    '''检测log所在上级目录是否存在，不存在则创建
        @param filepath: 文件目录
    '''
    _dir = os.path.dirname(filepath)
    if (not os.path.exists(_dir)):
        os.makedirs(_dir)
    pass
#    检测目录是否存在，不存在则创建
def mkdir_ifnot_exises(_dir):
    '''检测log所在上级目录是否存在，不存在则创建
        @param dir: 目录
    '''
    if (not os.path.exists(_dir)):
        os.makedirs(_dir)
    pass


#    写入配置文件
def write_conf(_dict, file_path):
    '''写入当前配置项的配置文件
        @param dict: 要写入的配置项字典
        @param file_path: 文件path
    '''
    file_path = convert_to_abspath(file_path)
    mkfiledir_ifnot_exises(file_path)
    
    #    存在同名文件先删除
    if (os.path.exists(file_path)):
        os.remove(file_path)
        pass
    
    fw = open(file_path, mode='w', encoding='utf-8')
    yaml.safe_dump(_dict, fw)
    fw.close()
    pass


#    追加sys.path
def append_sys_path(path):
    path = convert_to_abspath(path)
    sys.path.append(path)
    print(sys.path)
    pass


#	外部访问的属性
ALL_DICT, STOP_WORDS, DATASET_SOHU_NEWS, DATASET_SOHU_THUCNEWS, LDA, GSDMM, BERT, TBERT = load_conf_yaml()


