# -*- coding: utf-8 -*-
"""
@Env 
@Time 2024/9/29 下午6:26
@Author yzpang
@Function: 
"""
from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from modelserver.configs.model_config import DEFAULT_LR, DEFAULT_EPOCHS, DEFAULT_BATCH_SIZE, DEFAULT_DEVICE
from modelserver.train.file_util import load_file, parse_labels
from modelserver.train.custom_dataset import get_dataset
import torch


class TrainTransformer:
    def __init__(self, model_path, **kwargs):
        self.model_path = model_path
        self.tokenizer = BertTokenizer.from_pretrained(model_path)
        self.output_dir = kwargs.get('output_dir')
        self.learning_rate = DEFAULT_LR if kwargs.get('learning_rate') is None else kwargs.get('learning_rate')
        self.epochs = DEFAULT_EPOCHS if kwargs.get('epochs') is None else kwargs.get('epochs')
        self.batch_size = DEFAULT_BATCH_SIZE if kwargs.get('batch_size') is None else kwargs.get('batch_size')
        self.device = DEFAULT_DEVICE if kwargs.get('device') is None else kwargs.get('device')
        if 'auto' == self.device:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.label_path = kwargs.get('label_path')
        self.train_path = kwargs.get('train_path')
        self.eval_path = kwargs.get('eval_path')
        self.label_dict = self.get_label_dict(self.label_path)

    def train(self):
        train_dataset = self.get_data_loader(self.label_dict, self.train_path, self.tokenizer)
        if self.eval_path is not None:
            eval_dataset = self.get_data_loader(self.label_dict, self.eval_path, self.tokenizer)
        else:
            eval_dataset = None
        self._train(train_dataset, eval_dataset)

    def _train(self, train_data_loader, eval_data_loader=None):

        # 模型构建
        model = BertForSequenceClassification.from_pretrained(self.model_path, num_labels=len(self.label_dict))
        model.to(self.device)
        training_args = TrainingArguments(
            output_dir=self.output_dir,
            num_train_epochs=self.epochs,
            learning_rate=self.learning_rate,
            per_device_train_batch_size=self.batch_size,
            evaluation_strategy='epoch',
            logging_dir='./logs',
        )

        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=train_data_loader,
            eval_dataset=eval_data_loader,
        )
        # 训练模型
        trainer.train()
        # 评估
        trainer.evaluate()
        # 保存模型和Tokenizer
        model.save_pretrained(self.output_dir)
        self.tokenizer.save_pretrained(self.output_dir)


    def get_label_dict(self, label_path):
        _, labels2id = parse_labels(label_path)
        return labels2id

    def get_data_loader(self, label_dict, data_path, tokenizer):
        """
        获取评估数据集
        :param tokenizer: 分词器
        :param label_dict: 标签字典
        :param data_path: 评估数据
        :return: 评估数据集
        """
        data_texts, data_labels = load_file(data_path, label_dict)
        data_dataset = get_dataset(data_texts, data_labels, tokenizer)
        return data_dataset


if __name__ == '__main__':
    model_path = r"E:\AI\models\BERT\bert-base-chinese"
    train_data_path = r"data/train.txt"
    eval_data_path = r"data/eval.txt"
    label_path = r"data/label.txt"

    trainer = TrainTransformer(model_path=model_path,
                               output_dir='checkpoint',
                               learning_rate=1e-5,
                               epochs=5,
                               batch_size=8,
                               train_path=train_data_path,
                               eval_path=eval_data_path,
                               label_path=label_path
                               )
    trainer.train()
