# 基于modelscope中文gpt3底座二次开发得到诗词生成模型代码

from modelscope.msdatasets import MsDataset
from modelscope.trainers import build_trainer
from modelscope.msdatasets import MsDataset
from modelscope.utils.hub import read_config
from modelscope.metainfo import Metrics, Trainers
from datasets import Dataset
from modelscope.msdatasets import MsDataset

dataset_dict = MsDataset.load('chinese-poetry-collection')
train_dataset = dataset_dict['train'].remap_columns({'text1': 'src_txt'})
eval_dataset = dataset_dict['test'].remap_columns({'text1': 'src_txt'})
print (eval_dataset)
max_epochs = 10
tmp_dir = "./gpt3_poetry"

num_warmup_steps = 100
def noam_lambda(current_step: int):
    current_step += 1
    return min(current_step ** (-0.5), current_step * num_warmup_steps ** (-1.5))

def cfg_modify_fn(cfg):
    cfg.train.lr_scheduler = {
        "type": "LambdaLR",
        "lr_lambda": noam_lambda,
        "options": {"by_epoch": False}
    }
    cfg.train.optimizer = {
        "type": "AdamW",
        "lr": 3e-4
    }
    cfg.train.dataloader = {
        # "batch_size_per_gpu": 16,
        "batch_size_per_gpu": 8,
        "workers_per_gpu": 1
    }
    return cfg

kwargs = dict(
    model='damo/nlp_gpt3_text-generation_chinese-large',
    train_dataset=train_dataset,
    eval_datase=eval_dataset,
    max_epochs=max_epochs,
    work_dir=tmp_dir,
    cfg_modify_fn=cfg_modify_fn)

# 构造 trainer 并进行训练
trainer = build_trainer(
    name=Trainers.nlp_base_trainer, default_args=kwargs)
trainer.train()
