import os

from mindify.base.dict_object import DictObject
from transformers import BertTokenizer, AutoConfig


class Config:
    def __init__(self):
        """
        第一部分是 const 参数
        """
        # https://huggingface.co/uer/chinese_roberta_L-12_H-768
        # RoBERTa-base 是里面效果最好的，baseline 就使用这个模型，训练速度慢点
        # self.pretrained_model = "uer/chinese_roberta_L-12_H-768"
        self.pretrained_model = "IDEA-CCNL/Erlangshen-SimCSE-110M-Chinese"
        # self.pretrained_model = "uer/sbert-base-chinese-nli"
        # BERT 模型可被训练的层。 layer.8-10 都试过，效果这个最好
        self.bert_unfreeze_layers = ['layer.10.', 'layer.11.', 'bert.pooler', 'pooler.dense', 'out.']
        # 全局统一, 0.2 > 0.3
        self.dropout = 0.1
        # 强制使用 CUDA
        self.device = 'cuda'
        # 词向量维度，从 bert_config 初始化
        bert_config = AutoConfig.from_pretrained(self.pretrained_model)
        self.embed_dim = bert_config.hidden_size
        # 分类是不均衡的，所以在计算loss时需要引入分类的权重，当然也有人说不设置权重也可以，需要验证一下
        # https://www.zhihu.com/question/400443029
        self.num_labels = 16
        # 角色个数，本项目2个: 医生，患者
        self.num_speakers = 2
        # 95% 的句子长度都在39以内，加上拼接的 医生：患者：作为每句开头，43个字符
        self.max_seq_length = 43
        # dataloader num_workers
        self.num_workers = 0
        # lightning dataloader
        self.dataloader_num_workers = 0
        # 迭代次数，中间可以 early stop
        self.num_epochs = 300
        # 模型的保存位置
        self.save_path = None
        # checkpoint 学习参数等一并被加载, model 只是加载模型参数不包括学习参数, none 重新开始
        self.train_resume_mode = "none"

        self.optimizer = "adam"
        self.label_weights = None
        self.label_smoothing = 0.1
        # datamodule 加载后会初始化
        self.num_train_samples = None
        # num_train_samples / gradient_accumulation_steps
        self.steps_per_epoch = None

        """
        第二部分是可调参数
        """

        # https://zhuanlan.zhihu.com/p/386603816
        # 不同层用不同学习速率有点问题，模型不收敛，不知道为什么
        self.learning_rate = 5e-5
        # https://zhuanlan.zhihu.com/p/386603816
        # 按照网上的经验，1e-4是相对较好的经验值
        self.weight_decay = 1e-5

        # scheduler 参数
        self.scheduler_min_lr = 1e-8
        self.scheduler_patience = 3
        self.scheduler_factor = 0.5
        self.scheduler_threshold = 0.005
        self.scheduler_threshold_mode = 'rel'

        # 梯度累积次数，这个提高 N 倍，学习速率应该提高 Sqrt(N) 倍，这个值大了收敛速度很慢，可选的在 [4,8,16,32,64,128] 之间
        self.gradient_accumulation_steps = 8

        # 多头注意力网络头数和层数，头越多提取的特征越丰富
        self.num_attention_heads = 2
        self.num_attention_layers = 2
        # 多头注意力模块的前馈神经网络隐参数
        self.feed_forward_hidden_size = 1024
        self.num_feed_forward_layers = 6

        # 位置编码类型，normal, complex-order 可选
        self.position_embedding_type = "complex-order"

    def update_save_path(self, module):
        model_save_path = self.pretrained_model
        model_save_path = model_save_path.replace("\\", "/").replace(":", "").replace("/", "_")

        hash = module.hash()
        self.save_path = f'/tmp/{model_save_path}/{hash}/checkpoints'

        os.makedirs(self.save_path, 0o755, True)

    def find_last_ckpt_path(self):
        last_mtime = 0
        last_ckpt_path = None

        files = os.listdir(self.save_path)
        for file in files:
            file = os.path.join(self.save_path, file)
            if not file.endswith(".ckpt"):
                continue

            mtime = os.path.getmtime(file)
            if mtime > last_mtime:
                last_mtime = mtime
                last_ckpt_path = file

        print("last_ckpt_path", last_ckpt_path)
        return last_ckpt_path

    @property
    def tokenizer(self):
        tokenizer_key = 'tokenizer'
        if tokenizer_key not in self.__dict__:
            self.__dict__[tokenizer_key] = BertTokenizer.from_pretrained(self.pretrained_model)

        return self.__dict__[tokenizer_key]


if __name__ == '__main__':
    config = Config()
    print(config.__dict__)