import pandas as pd
import torch
from torch.utils.data import Dataset


class GPTDataset(Dataset):
    def __init__(self, csv_file, tokenizer, max_length=None, pad_token_id=50256):
        self.data = pd.read_csv(csv_file)
        self.encoded_text = [tokenizer.encode(text) for text in self.data['Text']]
        if max_length is None:
            self.max_length = self._get_max_length()
        else:
            self.max_length = max_length
            self.encoded_text = [
                text[:self.max_length] for text in self.encoded_text
            ]
        self.encoded_text = [
            text + [pad_token_id] * (self.max_length - len(text))
            for text in self.encoded_text
        ]

    def __getitem__(self, item):
        return (
            torch.tensor(self.encoded_text[item]),
            torch.tensor(self.data.iloc[item]['Label'])
        )

    def __len__(self):
        return len(self.encoded_text)

    def _get_max_length(self):
        max_len = 0
        for text in self.encoded_text:
            max_len = max(max_len, len(text))
        return max_len
