# -*- coding: utf-8 -*-
# time: 2025/4/8 17:30
# file: ft_demo1.py
# author: hanson

import os.path as osp
from modelscope.trainers import build_trainer
from modelscope.msdatasets import MsDataset
from modelscope.utils.hub import read_config
from modelscope.metainfo import Metrics
from modelscope.utils.constant import Tasks
from modelscope.metainfo import Trainers
# 登陆
from modelscope.hub.api import HubApi

api = HubApi()
api.login(YOUR_KEY)
# 加载数据集
model_id = 'damo/nlp_structbert_backbone_base_std'
dataset_id = YOUR_DATASET_NAME
train_dataset = MsDataset.load(dataset_id, namespace=YOUR_NAMESPACE, subset_name='default',
                               split='train').to_hf_dataset()
eval_dataset = MsDataset.load(dataset_id, namespace=YOUR_NAMESPACE, subset_name='default',
                              split='validation').to_hf_dataset()
# 过滤训练文本或标签为空的样本
train_dataset = train_dataset.filter(lambda x: x["label"] != None and x["order"] != None)
eval_dataset = eval_dataset.filter(lambda x: x["label"] != None and x["order"] != None)
# 训练log、模型保存的工作目录
WORK_DIR = 'workspace'


# 修改配置文件
def cfg_modify_fn(cfg):
    cfg.task = Tasks.sentiment_classification
    cfg['preprocessor'] = {'type': 'sen-cls-tokenizer'}
    cfg['dataset'] = {
        'train': {
            # 数据集中labels的值
            'labels': ['0', '1', '2', '3'],

            # csv文件中，训练文本的列名为'order'
            'first_sequence': 'order',

            # csv文件中，标签列名为'label'
            'label': 'label',
        }
    }
    # batch_size
    cfg.train.dataloader.batch_size_per_gpu = 32
    cfg.evaluation.dataloader.batch_size_per_gpu = 32

    # epoch
    cfg.train.max_epochs = 5

    # 学习率相关配置，可参考torch自带的学习率及对应的参数字段
    cfg.train.lr_scheduler = {'type': 'StepLR', 'step_size': 2,
                              'options': {'warmup': {'type': 'LinearWarmup', 'warmup_iters': 2}}}

    # hook
    cfg.train.hooks = cfg.train.hooks = [
        # 多少轮打一次log
        {
            'type': 'TextLoggerHook',
            'interval': 100
        },

        # 多少轮跑一次验证集，'by_epoch': True表示每一个epoch跑一次
        {
            'type': 'EvaluationHook',
            'by_epoch': True
        },

        # 多少轮保存一次checkpoint
        {
            'type': 'CheckpointHook',
            'by_epoch': True
        }]

    # 初始学习率
    cfg.train.optimizer.lr = 2e-6

    # 指定评测方法，Metrics.seq_cls_metric为文本分类的acc
    cfg.evaluation.metrics = [Metrics.seq_cls_metric]
    cfg.evaluation.dataloader.shuffle = True

    return cfg


# 构建trainer参数
kwargs = dict(
    model=model_id,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    work_dir=WORK_DIR,
    cfg_modify_fn=cfg_modify_fn,
)
# build_trainer
trainer = build_trainer(name='nlp-base-trainer', default_args=kwargs)
# 开始训练
print('===============================================================')
print('pre-trained model loaded, training started:')
print('===============================================================')
trainer.train()
print('===============================================================')
print('train success.')
print('===============================================================')
for i in range(max_epochs):
    eval_results = trainer.evaluate(f'{WORK_DIR}/epoch_{i + 1}.pth')
    print(f'epoch {i} evaluation result:')
    print(eval_results)
print('===============================================================')
print('evaluate success')
print('===============================================================')