import torch

from ai import AiConstant


class GPTConfig:
    def __init__(self):
        self.output_dir = AiConstant.OUTPUT_PATH + 'gpt/'
        self.data_path = AiConstant.DATA_PATH + 'gpt/data/composition'
        self.save_train_list_file_path = AiConstant.OUTPUT_PATH + 'gpt/train_list.pickle'
        self.save_model_path = self.output_dir + 'model'
        self.vocab_file_path = AiConstant.DATA_PATH + 'gpt/' + 'vocab/chinese_vocab.model'
        self.log_file_path = AiConstant.LOG_PATH + 'gpt/'
        self.window_size = 200  # 相当于每条句子的对打长度
        self.window_step = 200  # 滑动窗口的步幅
        self.max_input_size = 200  # 最大输入序列长度

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.num_workers = 1  # 加载数据的进程数
        self.batch_size = 12
        self.gradient_accumulation_steps = 4  # 梯度累计次数,4batch后再更新,减少显存的使用
        self.epochs = 10

        self.eps = 1e-9  # adamW 的衰减率
        self.lr = 5e-5
        self.log_steps = 10
        self.warmup_steps = 4000  # 预热步,在此范围内l增加
        self.eod_id = -1
        self.pad_id = -1
        self.pretrained_model = self.output_dir + '/model/zuowen_epoch40'
        self.model_config = self.output_dir + '/config/cpm-small.json'  # 从头训练模型
        self.seed = 1234
        self.cuda = torch.cuda.is_available()
        self.ignore_index = -100
        self.max_grad_norm = 1

        # 生成
        self.context_len = 100  # 生成时参考的上下文长度
        self.temperature = 1
        self.topk = 10
        self.topp = 0.9


args = GPTConfig()

gpt_logger = AiConstant.AI_LOGGER(AiConstant.LOG_PATH + "gpt/train.log")
