rtferraz commited on
Commit
46a6d37
·
verified ·
1 Parent(s): 256963c

Add finetune.py — finetune_domain_model (HF Trainer Pattern A, auto tabular_features passthrough)

Browse files
src/domain_tokenizer/training/finetune.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fine-tuning function for JointFusionModel.
3
+
4
+ Uses HF Trainer Pattern A — Trainer inspects JointFusionModel.forward() signature,
5
+ sees tabular_features, and auto-passes it from dataset. No Trainer subclass needed.
6
+ """
7
+
8
+ import logging
9
+ from typing import Optional
10
+
11
+ from torch.utils.data import Dataset as TorchDataset
12
+ from transformers import Trainer, TrainingArguments
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def finetune_domain_model(
18
+ model, train_dataset: TorchDataset, eval_dataset: Optional[TorchDataset] = None,
19
+ output_dir: str = "./domain_finetune_checkpoints", hub_model_id: Optional[str] = None,
20
+ num_epochs: int = 5, per_device_batch_size: int = 32, gradient_accumulation_steps: int = 1,
21
+ learning_rate: float = 1e-4, lr_scheduler_type: str = "cosine",
22
+ warmup_steps: int = 100, weight_decay: float = 0.01, max_grad_norm: float = 1.0,
23
+ bf16: bool = False, fp16: bool = False, logging_steps: int = 50,
24
+ save_steps: int = 500, save_strategy: str = "steps", eval_steps: int = 500,
25
+ save_total_limit: int = 3, dataloader_num_workers: int = 4, report_to: str = "none",
26
+ run_name: Optional[str] = None, seed: int = 42, gradient_checkpointing: bool = False,
27
+ resume_from_checkpoint: Optional[str] = None, **extra_training_args,
28
+ ) -> Trainer:
29
+ """Fine-tune a JointFusionModel with HF Trainer.
30
+
31
+ The Trainer auto-passes tabular_features from dataset to model because
32
+ it inspects forward() signature (Pattern A — no subclass needed).
33
+
34
+ Dataset must yield: {input_ids, attention_mask, tabular_features, labels}.
35
+ """
36
+ push_to_hub = hub_model_id is not None
37
+
38
+ training_args = TrainingArguments(
39
+ output_dir=output_dir, num_train_epochs=num_epochs,
40
+ per_device_train_batch_size=per_device_batch_size,
41
+ per_device_eval_batch_size=per_device_batch_size,
42
+ gradient_accumulation_steps=gradient_accumulation_steps,
43
+ learning_rate=learning_rate, lr_scheduler_type=lr_scheduler_type,
44
+ warmup_steps=warmup_steps, weight_decay=weight_decay, max_grad_norm=max_grad_norm,
45
+ bf16=bf16, fp16=fp16,
46
+ logging_strategy="steps", logging_steps=logging_steps,
47
+ logging_first_step=True, disable_tqdm=True,
48
+ eval_strategy="steps" if eval_dataset else "no",
49
+ eval_steps=eval_steps if eval_dataset else None,
50
+ save_strategy=save_strategy, save_steps=save_steps, save_total_limit=save_total_limit,
51
+ push_to_hub=push_to_hub, hub_model_id=hub_model_id if push_to_hub else None,
52
+ dataloader_num_workers=dataloader_num_workers, report_to=report_to,
53
+ run_name=run_name, seed=seed, gradient_checkpointing=gradient_checkpointing,
54
+ remove_unused_columns=True, **extra_training_args,
55
+ )
56
+
57
+ n_params = sum(p.numel() for p in model.parameters())
58
+ logger.info(f"=== Domain Fine-Tuning (Joint Fusion) ===")
59
+ logger.info(f" Model params: {n_params:,}, Train samples: {len(train_dataset):,}")
60
+ logger.info(f" Batch: {per_device_batch_size}x{gradient_accumulation_steps}, "
61
+ f"Epochs: {num_epochs}, LR: {learning_rate} ({lr_scheduler_type})")
62
+
63
+ trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset)
64
+ trainer.train(resume_from_checkpoint=resume_from_checkpoint)
65
+
66
+ if push_to_hub:
67
+ trainer.push_to_hub()
68
+
69
+ return trainer