# 定义实体标注的一些映射
import json
import os.path
from itertools import chain
from pathlib import Path

from transformers import BertConfig

EN_DICT = {
    '疾病和诊断':'DIS',
    '手术': 'OPE',
    '药物': 'MED',
    '解剖部位': 'OPS',
    '影像检查': '影像检查',
    '实验室检验': '实验室检验'
}

# *的作用是把迭代器拆开，每个值作为一个输入
TAGS = list(chain(*map(lambda tag:[f"B-{tag}", f"M-{tag}", f"E-{tag}", f"S-{tag}"], EN_DICT.values())))
TAGS.extend(['0'])
class Params(object):
    """
    定义参数对象,参数的定义，可以外部给定，也可以在内部直接初始化
    """
    def __init__(self, config: BertConfig, pre_model_type="BERT", ex_index=1, params=None):  # **指directory文件
        super(Params,self).__init__()
        if params is None:
            params = {}

        # 模型迁移相关参数
        self.pre_model_type = pre_model_type
        self.ex_index= ex_index
        # 根目录：当前utils所在的文件夹,params.get给定就用新的，不给定就自己确定当前目录是根目录
        self.root_path = Path(params.get('root_path', os.path.abspath(os.path.dirname(__file__))))
        # 数据集路径
        self.data_dir = Path(params.get('data_dir', self.root_path / 'data'))
        # bert模型对应的相关路径
        self.bert_root_dir = Path(params.get('bert_root_dir', self.root_path / 'bert'))
        self.bert_vocab_path = self.bert_root_dir / 'vocab.txt'
        # 参数路径
        self.params_path = Path(params.get('params_path', self.root_path / f'experiments/ex_{ex_index}'))
        self.params_path.mkdir(parents=True, exist_ok=True)
        # 模型保存路径
        self.model_dir = Path(params.get('model_dir', self.root_path / f'model/ex_{ex_index}'))
        self.model_dir.mkdir(parents=True, exist_ok=True)

        # 标签映射相关参数
        self.tag2idx = {tag: idx for idx, tag in enumerate(TAGS)}
        self.num_labels = len(self.tag2idx)

        # 读取数据训练的相关参数
        self.train_batch_size = params.get("train_batch_size", 32)
        self.val_batch_size = params.get("val_batch_size", 32)
        self.test_batch_size = params.get("test_batch_size", 128)
        self.data_cathe = params.get("data_cathe", True)   # 是否做缓存

        # 序列最大长度 超过就split
        self.max_seq_length = params.get("max_seq_length", 512)

        # 定义模型相关参数
        self.lm_layer_name = "Word2VecLMModule"
        self.config: BertConfig = config
        self.encoder_layer_name = "BiLSTMEncoderModule"
        self.encoder_output_size = params.get("encoder_output_size")  # 最终Encoder输出的特征向量维度大小
        self.encoder_lstm_layers = 1  # Encoder BiLSTM的层数
        self.encoder_lstm_dropout = 0.0  # Encoder BiLSTM中dropout系数
        self.encoder_lstm_hidden_size = 128
        self.encoder_lstm_with_ln = True  # 是否做ln
        self.classify_layer_name = "SoftmaxSeqClassifyModule"
        self.classify_fc_layers = None  # 全连接的层次数目
        self.classify_fc_hidden_size = params.get("classify_fc_hidden_size")  # 给定全连接中的神经元数目，可以是None、int或者list[int]
        self.classify_fc_dropout = 0.0

        # 参数check&reset
        if self.classify_fc_hidden_size is None:
            self.classify_fc_hidden_size = []
        if isinstance(self.classify_fc_hidden_size, int):
            self.classify_fc_hidden_size = [self.classify_fc_hidden_size]
        if len(self.classify_fc_hidden_size) > 0:
            if self.classify_fc_hidden_size[-1] != self.num_labels:
                # 如果全连接最后一层不是标签大小，那么直接添加一个标签大小
                self.classify_fc_hidden_size.append(self.num_labels)
        self.classify_fc_layers = len(self.classify_fc_hidden_size)
        if self.classify_fc_layers == 0:
            # 如果classify决策层中不存在全连接，那么encoder输出就是每个token对应每个类别的置信度
            self.encoder_output_size = self.num_labels





    @staticmethod
    def load(json_path):
        with open(json_path, 'r', encoding='utf-8') as reader:
            params = json.load(reader)
            return Params(pre_model_type=params['pre_model_type'], ex_index=params['ex_index'], params=params)  # params作为一个字典传进去

    def save(self, json_path = None):
        if json_path is None:
            json_path = self.params_path / "params.json"
        params = {}
        with open(json_path, 'w', encoding='utf-8') as writer:

            for k,v in self.__dict__.items():
                if isinstance(v, Path):
                    v = str(v.absolute())
                params[k] = v
            json.dump(params, writer, ensure_ascii=False)


if __name__ == '__main__':
    param = Params(params={'root_path':r'.\runs'})
    param.save()
    param = Params.load(r'.\runs\experiments\ex_1\params.json')
    print(param.max_seq_length)






