|
from transformers import TrainingArguments |
|
|
|
training_args = TrainingArguments( |
|
output_dir="detr-resnet-50_finetuned_cppe5-second", |
|
per_device_train_batch_size=8, |
|
num_train_epochs=30, # Mise à jour pour correspondre à num_epochs: 100 |
|
fp16=False, |
|
save_steps=200, |
|
logging_steps=50, |
|
learning_rate=1e-5, |
|
weight_decay=1e-4, |
|
save_total_limit=2, |
|
remove_unused_columns=False, |
|
push_to_hub=True, |
|
seed=42, # Ajout de seed: 42 |
|
lr_scheduler_type="linear", # Mise à jour pour correspondre à lr_scheduler_type: linear |
|
optim="adamw_torch", # Optimizer Adam avec betas et epsilon définis ci-dessous |
|
) |
|
|
|
# Pour spécifier les paramètres de l'optimiseur Adam, vous pouvez les passer lors de la création de l'optimiseur dans la fonction d'entraînement |
|
from transformers import AdamW |
|
optimizer = AdamW(model.parameters(), lr=1e-5, betas=(0.9, 0.999), eps=1e-08) |
|
|
|
IoU metric: bbox |
|
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.116 |
|
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.193 |
|
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.125 |
|
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.006 |
|
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.025 |
|
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.115 |
|
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.102 |
|
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.196 |
|
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.239 |
|
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.052 |
|
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.115 |
|
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.227 |