Upload folder using huggingface_hub
Browse files- README.md +13 -26
- predictor/assets.json +1 -0
- predictor/config.yaml +142 -0
- predictor/data_processors.pkl +3 -0
- predictor/df_preprocessor.pkl +3 -0
- predictor/eval_metric.pkl +3 -0
- predictor/events.out.tfevents.1758509227.97d04d10c71c.622.2 +3 -0
- predictor/hparams.yaml +24 -0
- predictor/model.ckpt +3 -0
README.md
CHANGED
|
@@ -1,10 +1,4 @@
|
|
| 1 |
-
|
| 2 |
-
from pathlib import Path
|
| 3 |
-
|
| 4 |
-
DATASET_ID = "cassieli226/2025-24679-image-dataset"
|
| 5 |
-
repo_dir = Path("autogluon_image_artifacts")
|
| 6 |
-
|
| 7 |
-
yaml_header = """---
|
| 8 |
tags:
|
| 9 |
- autogluon
|
| 10 |
- image
|
|
@@ -17,29 +11,22 @@ datasets:
|
|
| 17 |
metrics:
|
| 18 |
- accuracy
|
| 19 |
- f1
|
| 20 |
-
---
|
| 21 |
|
| 22 |
-
body = f"""
|
| 23 |
# HW2 — Image AutoML
|
| 24 |
|
| 25 |
-
|
| 26 |
-
**
|
| 27 |
|
| 28 |
-
|
| 29 |
-
-
|
| 30 |
-
- External (original): accuracy {acc_o:.4f}, F1-weighted {f1w_o:.4f}
|
| 31 |
|
| 32 |
## Data
|
| 33 |
-
-
|
| 34 |
-
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
- preset: medium_quality
|
| 38 |
-
- epochs: 6
|
| 39 |
-
- batch size: 16
|
| 40 |
-
- lr: 5e-4
|
| 41 |
-
""".lstrip()
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
|
|
|
| 1 |
+
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
tags:
|
| 3 |
- autogluon
|
| 4 |
- image
|
|
|
|
| 11 |
metrics:
|
| 12 |
- accuracy
|
| 13 |
- f1
|
| 14 |
+
---
|
| 15 |
|
|
|
|
| 16 |
# HW2 — Image AutoML
|
| 17 |
|
| 18 |
+
## Description
|
| 19 |
+
Trained with **AutoGluon** on an augmented image split (train/test) and validated on the original split. Backbone: `timm/resnet18`.
|
| 20 |
|
| 21 |
+
- **Problem Type**: `image-classification`
|
| 22 |
+
- **Preset**: `medium_quality`
|
|
|
|
| 23 |
|
| 24 |
## Data
|
| 25 |
+
- Train samples: 256
|
| 26 |
+
- Test samples: 64
|
| 27 |
+
- External (original) samples: 40
|
| 28 |
+
- Dataset: `cassieli226/2025-24679-image-dataset`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
## Results
|
| 31 |
+
- **Test (accuracy / f1_weighted)**: 1.0000 / 1.0000
|
| 32 |
+
- **Original (accuracy / f1_weighted)**: 1.0000 / 1.0000
|
predictor/assets.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"learner_class": "BaseLearner", "column_types": {"image": "image_path", "label": "categorical"}, "label_column": "label", "problem_type": "binary", "presets": "medium_quality", "eval_metric_name": "accuracy", "validation_metric_name": "accuracy", "minmax_mode": "max", "output_shape": 2, "save_path": "/content/autogluon_image_artifacts/predictor", "pretrained": true, "pretrained_path": null, "fit_called": true, "best_score": null, "total_train_time": null, "version": "1.4.0"}
|
predictor/config.yaml
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
names:
|
| 3 |
+
- timm_image
|
| 4 |
+
timm_image:
|
| 5 |
+
checkpoint_name: resnet18
|
| 6 |
+
mix_choice: all_logits
|
| 7 |
+
data_types:
|
| 8 |
+
- image
|
| 9 |
+
train_transforms:
|
| 10 |
+
- resize_shorter_side
|
| 11 |
+
- center_crop
|
| 12 |
+
- trivial_augment
|
| 13 |
+
val_transforms:
|
| 14 |
+
- resize_shorter_side
|
| 15 |
+
- center_crop
|
| 16 |
+
image_norm: imagenet
|
| 17 |
+
image_size: null
|
| 18 |
+
image_chan_num: 3
|
| 19 |
+
use_learnable_image: false
|
| 20 |
+
max_image_num_per_column: 1
|
| 21 |
+
data:
|
| 22 |
+
image:
|
| 23 |
+
missing_value_strategy: zero
|
| 24 |
+
text:
|
| 25 |
+
normalize_text: false
|
| 26 |
+
categorical:
|
| 27 |
+
minimum_cat_count: 100
|
| 28 |
+
maximum_num_cat: 20
|
| 29 |
+
convert_to_text: false
|
| 30 |
+
convert_to_text_template: latex
|
| 31 |
+
numerical:
|
| 32 |
+
convert_to_text: false
|
| 33 |
+
scaler_with_mean: true
|
| 34 |
+
scaler_with_std: true
|
| 35 |
+
document:
|
| 36 |
+
missing_value_strategy: zero
|
| 37 |
+
label:
|
| 38 |
+
numerical_preprocessing: standardscaler
|
| 39 |
+
pos_label: null
|
| 40 |
+
column_features_pooling_mode: concat
|
| 41 |
+
mixup:
|
| 42 |
+
turn_on: false
|
| 43 |
+
mixup_alpha: 0.8
|
| 44 |
+
cutmix_alpha: 1.0
|
| 45 |
+
cutmix_minmax: null
|
| 46 |
+
prob: 1.0
|
| 47 |
+
switch_prob: 0.5
|
| 48 |
+
mode: batch
|
| 49 |
+
turn_off_epoch: 5
|
| 50 |
+
label_smoothing: 0.1
|
| 51 |
+
modality_dropout: 0
|
| 52 |
+
templates:
|
| 53 |
+
turn_on: false
|
| 54 |
+
num_templates: 30
|
| 55 |
+
template_length: 2048
|
| 56 |
+
preset_templates:
|
| 57 |
+
- super_glue
|
| 58 |
+
- rte
|
| 59 |
+
custom_templates: null
|
| 60 |
+
optim:
|
| 61 |
+
optim_type: adamw
|
| 62 |
+
lr: 0.0005
|
| 63 |
+
weight_decay: 0.001
|
| 64 |
+
lr_choice: layerwise_decay
|
| 65 |
+
lr_decay: 0.9
|
| 66 |
+
lr_schedule: cosine_decay
|
| 67 |
+
max_epochs: 6
|
| 68 |
+
max_steps: -1
|
| 69 |
+
warmup_steps: 0.1
|
| 70 |
+
end_lr: 0
|
| 71 |
+
lr_mult: 1
|
| 72 |
+
patience: 10
|
| 73 |
+
val_check_interval: 0.5
|
| 74 |
+
check_val_every_n_epoch: 1
|
| 75 |
+
skip_final_val: false
|
| 76 |
+
gradient_clip_val: 1
|
| 77 |
+
gradient_clip_algorithm: norm
|
| 78 |
+
track_grad_norm: -1
|
| 79 |
+
log_every_n_steps: 10
|
| 80 |
+
label_smoothing: 0
|
| 81 |
+
top_k: 3
|
| 82 |
+
top_k_average_method: greedy_soup
|
| 83 |
+
peft: null
|
| 84 |
+
lora:
|
| 85 |
+
module_filter: null
|
| 86 |
+
filter:
|
| 87 |
+
- query
|
| 88 |
+
- value
|
| 89 |
+
- ^q$
|
| 90 |
+
- ^v$
|
| 91 |
+
- ^k$
|
| 92 |
+
- ^o$
|
| 93 |
+
r: 8
|
| 94 |
+
alpha: 8
|
| 95 |
+
conv_lora_expert_num: 8
|
| 96 |
+
loss_func: auto
|
| 97 |
+
focal_loss:
|
| 98 |
+
alpha: null
|
| 99 |
+
gamma: 2.0
|
| 100 |
+
reduction: mean
|
| 101 |
+
mask2former_loss:
|
| 102 |
+
loss_cross_entropy_weight: 10.0
|
| 103 |
+
loss_mask_weight: 5.0
|
| 104 |
+
loss_dice_weight: 5.0
|
| 105 |
+
extra_trainable_params: []
|
| 106 |
+
cross_modal_align: null
|
| 107 |
+
cross_modal_align_weight: 0
|
| 108 |
+
automatic_optimization: true
|
| 109 |
+
lemda:
|
| 110 |
+
turn_on: false
|
| 111 |
+
arch_type: mlp_vae
|
| 112 |
+
z_dim: 8
|
| 113 |
+
num_layers: 6
|
| 114 |
+
kld_weight: 0.1
|
| 115 |
+
mse_weight: 0.1
|
| 116 |
+
adv_weight: 0.0001
|
| 117 |
+
consist_weight: 0.01
|
| 118 |
+
consist_threshold: 0.5
|
| 119 |
+
lr: 0.0001
|
| 120 |
+
optim_type: adamw
|
| 121 |
+
weight_decay: 1.0e-05
|
| 122 |
+
env:
|
| 123 |
+
num_gpus: 0
|
| 124 |
+
num_nodes: 1
|
| 125 |
+
batch_size: 16
|
| 126 |
+
per_gpu_batch_size: 16
|
| 127 |
+
inference_batch_size_ratio: 4
|
| 128 |
+
precision: 32
|
| 129 |
+
num_workers: 0
|
| 130 |
+
num_workers_inference: 2
|
| 131 |
+
accelerator: auto
|
| 132 |
+
fast_dev_run: false
|
| 133 |
+
deterministic: false
|
| 134 |
+
auto_select_gpus: true
|
| 135 |
+
strategy: auto
|
| 136 |
+
deepspeed_allgather_size: 1000000000.0
|
| 137 |
+
deepspeed_allreduce_size: 1000000000.0
|
| 138 |
+
compile:
|
| 139 |
+
turn_on: false
|
| 140 |
+
mode: default
|
| 141 |
+
dynamic: true
|
| 142 |
+
backend: inductor
|
predictor/data_processors.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc0cc1c584d582f5e79c7dc2d1ebd08c875cf4e9afbde68fb2987c877b6a021c
|
| 3 |
+
size 1517
|
predictor/df_preprocessor.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca584735658d10e9df1d456b2189412fd00d25976ffed677beacab565b00bf9b
|
| 3 |
+
size 13407
|
predictor/eval_metric.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0024ce25367aac2c6595f96f9b18415e8483aa9fb4771dff28ea934a6ce1bca
|
| 3 |
+
size 220
|
predictor/events.out.tfevents.1758509227.97d04d10c71c.622.2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1d5ed3f38833e4cf2dd51622d7cf951c7411ea3f630193cf42e756a49856b31
|
| 3 |
+
size 5391
|
predictor/hparams.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
optim_type: adamw
|
| 2 |
+
lr_choice: layerwise_decay
|
| 3 |
+
lr_schedule: cosine_decay
|
| 4 |
+
lr: 0.0005
|
| 5 |
+
lr_decay: 0.9
|
| 6 |
+
end_lr: 0
|
| 7 |
+
lr_mult: 1
|
| 8 |
+
weight_decay: 0.001
|
| 9 |
+
warmup_steps: 0.1
|
| 10 |
+
validation_metric_name: accuracy
|
| 11 |
+
peft: null
|
| 12 |
+
mixup_off_epoch: 5
|
| 13 |
+
skip_final_val: false
|
| 14 |
+
track_grad_norm: -1
|
| 15 |
+
cross_modal_align: null
|
| 16 |
+
cross_modal_align_weight: 0
|
| 17 |
+
automatic_optimization: true
|
| 18 |
+
accumulate_grad_batches: 1
|
| 19 |
+
gradient_clip_val: 1
|
| 20 |
+
gradient_clip_algorithm: norm
|
| 21 |
+
use_aug_optim: false
|
| 22 |
+
aug_lr: 0.0001
|
| 23 |
+
aug_weight_decay: 1.0e-05
|
| 24 |
+
aug_optim_type: adamw
|
predictor/model.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:80633376688caa2e6ac8f2d5b4608f1115bfdc9ff67f87fc7274d10dc11dbd35
|
| 3 |
+
size 44779915
|