import os

import numpy as np
import pandas as pd
from Config import Paths, Config
from Utils import sep, LOGGER
from transformers import AutoTokenizer
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import torch
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import StratifiedKFold, train_test_split

tokenizer = AutoTokenizer.from_pretrained(os.path.join(Paths.OUTPUT_DIR, "tokenizer")) # 从本地加载

def prepare_input(cfg, text, tokenizer):
    inputs = tokenizer.encode_plus(
        text,
        return_tensors=None, # 返回Python列表而非张量
        add_special_tokens=True, # 添加 [CLS] 和 [SEP] 等特殊token
        max_length=cfg.MAX_LEN,
        padding='max_length', # 填充
        truncation=True # 截断
    )
    for key, value in inputs.items():
        inputs[key] = torch.tensor(value, dtype=torch.long)
    return inputs



def collate(inputs):
    mask_len = int(inputs["attention_mask"].sum(axis=1).max()) # 动态计算当前批次中所有样本的实际有效长度，并找出最大的那个长度
    for key, value in inputs.items():
        inputs[key] = inputs[key][:,:mask_len] # 对每个张量进行切片，保留前mask_len个token
    return inputs

class Dataset(Dataset):
    def __init__(self, cfg, df, tokenizer):
        self.cfg = cfg
        self.tokenizer = tokenizer
        self.texts = df['text'].values
        self.labels = df['generated'].values
        self.text_ids = df['id'].values

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, item): # 获取单个样本
        output = {}
        output["inputs"] = prepare_input(self.cfg, self.texts[item], self.tokenizer) # 文本编码
        output["labels"] = torch.tensor(self.labels[item], dtype=torch.float) # 标签类型转换
        output["ids"] = self.text_ids[item]
        return output

