import tiktoken
from torch.utils.data import DataLoader
tokenizer = tiktoken.get_encoding("gpt2")

GPT_CONFIG = {
    "vocab_size": 50257,  # Vocabulary size
    "context_length": 1024,  # Context length
    "emb_dim": 768,  # Embedding dimension
    "n_heads": 12,  # Number of attention heads
    "n_layers": 12,  # Number of layers
    "drop_rate": 0.1,  # Dropout rate
    "qkv_bias": False  # Query-Key-Value bias
}

from utils.GPTDataset import GPTDataset

train_set = GPTDataset(
    csv_file='data/train.csv',
    tokenizer=tokenizer,
    max_length=GPT_CONFIG['context_length'],
)

test_set = GPTDataset(
    csv_file='data/test.csv',
    tokenizer=tokenizer,
    max_length=GPT_CONFIG['context_length'],
)
val_set = GPTDataset(
    csv_file='data/validation.csv',
    tokenizer=tokenizer,
    max_length=GPT_CONFIG['context_length'],
)

batch_size = 8
number_workers = 0
train_loader = DataLoader(
    dataset=train_set,
    batch_size=batch_size,
    shuffle=True,
    num_workers=number_workers,
    drop_last=True
)

test_loader = DataLoader(
    dataset=test_set,
    batch_size=batch_size,
    num_workers=number_workers,
    drop_last=False
)
val_loader = DataLoader(
    dataset=val_set,
    batch_size=batch_size,
    num_workers=number_workers,
    drop_last=False
)

