import os
import torch
from transformers import BertTokenizer


class Config:
    train_data_path = './data/train_data.txt'
    valid_data_path = './data/validate_data.txt'
    log_dir = os.getcwd() + '/Logs/Bert_CRF/'
    checkpoint_dir = os.getcwd() +'/CheckPoint'
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    maxlength = 400
    label_to_ids = {"O": 0, "B-dis": 1, "I-dis": 2, "B-sym": 3, "I-sym": 4, '[START]': 5, '[STOP]': 6}
    ids_to_label = {0: 'O', 1: 'B-dis', 2: 'I-dis', 3: 'B-sym', 4: 'I-sym', 5: '[START]', 6: '[STOP]'}
    num_labels = 7
    hidden_size = 768
    dropout = 0.8
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 训练参数
    full_finetuning = True
    learning_rate = 3e-5
    weight_decay = 0.01
    clip_grad = 5
    batch_size = 1
    num_epochs = 3
    crf_lr = 0.01
    lr = 0.01
    # scheduler参数
    lr_step = 3
    lr_gamma = 0.5
    betas = (0.9, 0.999)
    eps = 1e-6

    def __init__(self, **kwargs):
        for k, v in kwargs.items():
            setattr(self, k, v)
