import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader


class PretrainDataset(Dataset):
    def __init__(self, csv_file, max_length=4096):

        df = pd.read_csv(csv_file, dtype={'text': str})

        # 获取文本列表，并去除缺失值
        self.texts = df['text'].tolist()
        self.max_length = max_length

        # 扩展文本列表，复制每个样本replication_factor次
        self.extended_texts = []
        for text in self.texts:
            self.extended_texts.extend([text] * 2)

    def __len__(self):
        # 返回扩展后的文本列表长度
        return len(self.extended_texts)

    def __getitem__(self, idx):
        text = self.extended_texts[idx]

        # 将文本转换为整数列表
        text = [int(token) for token in text.split()]

        # 添加CLS标记（7999），截断或填充至最大长度，并添加PAD标记（7998）
        text = [7999] + text + [7998] * (self.max_length - len(text) - 1)

        labels = text.copy()
        rand = torch.rand(len(text))

        mask_arr = (rand < 0.15) * (torch.tensor(text) != 7999) * (torch.tensor(text) != 7998)

        selection = torch.flatten(mask_arr.nonzero()).tolist()
        for sel in selection:
            text[sel] = 7997

        return {
            'input_ids': torch.tensor(text),
            'labels': torch.tensor(labels)
        }


class ClassificationDataset(Dataset):
    def __init__(self, csv_file, max_length=4096):
        df = pd.read_csv(csv_file, sep='\t')
        self.texts = df['text'].dropna().tolist()
        self.labels = df['label'].dropna().tolist()
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]

        text = [int(token) for token in text.split()]

        text = [7999] + text + [7998] * (self.max_length - len(text) - 1)

        return {
            'input_ids': torch.tensor(text),
            'labels': torch.tensor(label)
        }


class PredictionDataset(Dataset):
    def __init__(self, csv_file, max_length=4096, max_segments=20):
        self.max_length = max_length
        self.max_segments = max_segments
        df = pd.read_csv(csv_file, sep='\t')
        self.texts = df['text'].dropna().tolist()

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        tokens = [int(token) for token in text.split()]

        segments = []
        segment_size = self.max_length - 1

        if len(tokens) < self.max_length:
            tokens = [7999] + tokens + [7998] * (self.max_length - len(tokens) - 1)
            segments.append(tokens)
            actual_segments = 1
        else:
            # 如果超过一段，就分割文本成多个段，并记录实际段数
            actual_segments = 0
            for i in range(0, len(tokens), segment_size):
                segment = tokens[i:i + segment_size]
                segment = [7999] + segment
                if len(segment) == self.max_length:
                    segments.append(segment)
                    actual_segments += 1
                else:
                    segment = [7999] + tokens[-segment_size:]
                    segment = segment + [7998] * (self.max_length - len(segment))

                if len(segments) >= self.max_segments:
                    break

        # 如果段数少于最大段数，则填充空段
        while len(segments) < self.max_segments:
            empty_segment = [7999] + [7998] * (self.max_length - 1)
            segments.append(empty_segment)

        # 将所有段转换为张量并堆叠成一个三维张量
        segments_tensor = torch.tensor(segments, dtype=torch.long)

        return segments_tensor, actual_segments  # 返回张量和实际段数
