File size: 1,798 Bytes
a1c81a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from transformers import T5TokenizerFast, T5ForConditionalGeneration, Trainer, TrainingArguments, DataCollatorForSeq2Seq
from datasets import load_dataset, Dataset
import os
import json

# === Load your custom tokenizer ===
tokenizer = T5TokenizerFast.from_pretrained("minicoderx-tokenizer")

# === Load or create dataset ===
def load_jsonl(path):
    with open(path) as f:
        data = [json.loads(line) for line in f]
    return Dataset.from_dict({
        "input": [x["input"] for x in data],
        "output": [x["output"] for x in data]
    })

dataset = load_jsonl("data/train.jsonl")

# === Tokenize dataset ===
def tokenize(batch):
    return tokenizer(batch["input"], padding="max_length", truncation=True, max_length=128)

def tokenize_labels(batch):
    labels = tokenizer(batch["output"], padding="max_length", truncation=True, max_length=128)
    batch["labels"] = labels["input_ids"]
    return batch

dataset = dataset.map(tokenize)
dataset = dataset.map(tokenize_labels)

# === Load pre-trained T5-small ===
model = T5ForConditionalGeneration.from_pretrained("t5-small")

# === Training configuration ===
training_args = TrainingArguments(
    output_dir="minicoderx-model",
    per_device_train_batch_size=4,
    num_train_epochs=3,
    logging_steps=10,
    save_strategy="epoch",
    evaluation_strategy="no",
    save_total_limit=2,
    fp16=True,
    overwrite_output_dir=True,
)

data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=dataset,
    data_collator=data_collator,
    tokenizer=tokenizer
)

# === Train ===
trainer.train()

# === Save model ===
trainer.save_model("minicoderx-model")
tokenizer.save_pretrained("minicoderx-model")

print("Training complete and model saved.")