# https://blog.csdn.net/weixin_44826203/article/details/126295253
import json
import torch
from torch.utils.data import dataset
# from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainer, \
    Seq2SeqTrainingArguments

pretrained_model = r"D:\codes\nlp_about\pretrained_model\Langboat_mengzi-t5-base"

tokenizer = AutoTokenizer.from_pretrained(pretrained_model)

model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model)


class Seq2SeqDataset(dataset.Dataset):
    def __init__(self, train_json_path, tokenizer, content_len=32, title_len=16):

        self.source_ids = []
        self.title = []
        self.tokenizer = tokenizer
        self.content_len = content_len
        self.title_len = title_len

        self._load_data(train_json_path)

    def _load_data(self, train_json_path):
        with open(train_json_path, "r", encoding="utf-8") as f:
            lines = f.readlines()
        for line in lines:
            if not line.strip():
                continue
            _data = json.loads(line.strip())
            self.content.append(_data.get("content", ""))
            self.title.append(_data.get("title", ""))

    def __len__(self):
        return len(self.content)

    def __getitem__(self, item):
        return


def collect_fn(batch):
    pass
