from transformers import CpmTokenizer, GPT2LMHeadModel, GPT2Config

import ai.utils.utils_model
from project.enchanter_gpt.model import GPTModel
from project.enchanter_gpt.train import train_model
from project.enchanter_gpt.dataset import load_dataset
from project.enchanter_gpt import gpt_config

from ai.utils import utils_file

if __name__ == '__main__':
    args = gpt_config.args
    logger = gpt_config.gpt_logger

    # 初始化tokenizer
    tokenizer = CpmTokenizer(vocab_file=args.vocab_file_path)
    args.eod_id = tokenizer.convert_tokens_to_ids("<eod>")  # 文档结束符
    args.pad_id = tokenizer.pad_token_id
    utils_file.makedir(args.output_dir)

    #  设置随机种子
    ai.utils.utils_model.set_random_seed(args.seed, args.cuda)

    # 创建模型
    if args.pretrained_model:  # 加载预训练模型
        model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
    else:  # 初始化模型
        model_config = GPT2Config.from_json_file(args.model_config)
        model = GPT2LMHeadModel(config=model_config)
    model = model.to(args.device)
    logger.info('model config:\n{}'.format(model.config.to_json_string()))
    assert model.config.vocab_size == tokenizer.vocab_size

    # 计算参数量
    num_parameters = utils_file.calculate_parameters_num(model)
    logger.info('number of model parameters: {}'.format(num_parameters))

    # 打印配置参数
    logger.info('args:{}'.format(ai.utils.utils_model.get_attributes_of_object_to_dict(args)))
    # 加载数据
    train_dataset = load_dataset(logger, args)
    train_model(model, train_dataset, logger, args)
