from datasets import load_dataset
dataset = load_dataset("LeroyDyer/Mahabharata", split = "train[:5000]")
EOS_TOKEN = tokenizer.eos_token
def formatting_func(example):
max_seq_length = 4098 # Maximum sequence length
text = example["Text"] + EOS_TOKEN
chunks = [text[i:i+max_seq_length] for i in range(0, len(text), max_seq_length)]
formatted_examples = [{"Text": chunk} for chunk in chunks]
return formatted_examples