import torch.cuda
from transformers import  BertTokenizer, BertConfig
import os

def path_exist_makedirs(path):
    if not os.path.exists(path):
        os.makedirs(path)

class Bert_Config_ZC(object):
    def __init__(self):
        super().__init__()
        # 设备
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        # 数据目录
        data_rootpath = '../../../data'
        self.train_data_path = data_rootpath + '/pediatric_train.csv'
        self.dev_data_path = data_rootpath + '/pediatric_dev.csv'
        self.test_data_path = data_rootpath + '/pediatric_test.csv'
        class_data_path = data_rootpath + "/class.txt"

        # 模型保存目录
        models_savepath = '../save_models'
        path_exist_makedirs(models_savepath)
        self.pediatric_model_save_path = models_savepath + '/pediatric_bert_classifer_model.pt'
        self.model_save_score_path = models_savepath + '/pediatric_bert_classifer_model_score.txt'

        # 分类列表
        with open(class_data_path, encoding='utf-8') as f:
            self.class_list = [line.strip() for line in f.readlines()]

        # 预训练模型根目录
        pre_train_rootpath = '../pre_train_models'

        # bert相关
        # bert预训练模型所在目录
        self.bert_pretrain_path = pre_train_rootpath + '/bert-base-chinese'
        # print(self.bert_pretrain_path)
        self.bert_conf = BertConfig.from_pretrained(self.bert_pretrain_path)
        self.bert_tokenizer = BertTokenizer.from_pretrained(self.bert_pretrain_path)

        # 参数设置，含超参数
        self.num_classes = len(self.class_list)
        self.seq_max_len = 80 # 句子最大长度
        self.batch_size = 128 # 每一批数据大小
        self.epochs = 3 # 轮次
        self.lr = 5e-5  # 学习率
        self.average = 'weighted'


if __name__ == '__main__':
    conf = Bert_Config_ZC()
    import pandas as pd
    dev_pd = pd.read_csv(conf.dev_data_path, header=None, names=['label', 'text'])
    print(dev_pd.head(10))














