timm
/

Image Classification
timm
PyTorch
Safetensors
Ross Wightman commited on
Commit
b202c40
1 Parent(s): 1bf68aa

Add weights and args

Browse files
Files changed (2) hide show
  1. pytorch_model.bin +3 -0
  2. train_args.yaml +119 -0
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f653cde0f3870d878f57e490bb7b51d741d6127754d4244c03dd3aa90e6e39f4
3
+ size 1218173959
train_args.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aa: rand-m10-inc1-mstd101
2
+ amp: true
3
+ aot_autograd: false
4
+ apex_amp: false
5
+ aug_repeats: 0
6
+ aug_splits: 0
7
+ batch_size: 22
8
+ bce_loss: false
9
+ bce_target_thresh: null
10
+ bn_eps: null
11
+ bn_momentum: null
12
+ channels_last: false
13
+ checkpoint_hist: 10
14
+ class_map: ''
15
+ clip_grad: 3.0
16
+ clip_mode: norm
17
+ color_jitter: 0.4
18
+ cooldown_epochs: 10
19
+ crop_pct: 1.0
20
+ cutmix: 0.0
21
+ cutmix_minmax: null
22
+ data_dir: /data/tfds
23
+ dataset: tfds/imagenet2012
24
+ dataset_download: false
25
+ decay_epochs: 100
26
+ decay_milestones:
27
+ - 30
28
+ - 60
29
+ decay_rate: 0.1
30
+ dist_bn: reduce
31
+ drop: 0.0
32
+ drop_block: null
33
+ drop_connect: null
34
+ drop_path: 0.1
35
+ epoch_repeats: 0.0
36
+ epochs: 60
37
+ eval_metric: top1
38
+ experiment: ''
39
+ fast_norm: false
40
+ fuser: ''
41
+ gp: null
42
+ grad_checkpointing: false
43
+ hflip: 0.5
44
+ img_size: 336
45
+ in_chans: null
46
+ initial_checkpoint: ''
47
+ input_size: null
48
+ interpolation: ''
49
+ jsd_loss: false
50
+ layer_decay: 0.8
51
+ local_rank: 0
52
+ log_interval: 50
53
+ log_wandb: false
54
+ lr: 7.0e-05
55
+ lr_cycle_decay: 0.5
56
+ lr_cycle_limit: 1
57
+ lr_cycle_mul: 1.0
58
+ lr_k_decay: 1.0
59
+ lr_noise: null
60
+ lr_noise_pct: 0.67
61
+ lr_noise_std: 1.0
62
+ mean: null
63
+ min_lr: 5.0e-07
64
+ mixup: 0.0
65
+ mixup_mode: batch
66
+ mixup_off_epoch: 0
67
+ mixup_prob: 1.0
68
+ mixup_switch_prob: 0.5
69
+ model: vit_large_patch14_clip_224.laion2b_ft_in12k
70
+ model_ema: true
71
+ model_ema_decay: 0.9997
72
+ model_ema_force_cpu: false
73
+ momentum: 0.9
74
+ native_amp: false
75
+ no_aug: false
76
+ no_ddp_bb: false
77
+ no_prefetcher: false
78
+ no_resume_opt: false
79
+ num_classes: 1000
80
+ opt: adamw
81
+ opt_betas: null
82
+ opt_eps: null
83
+ output: ''
84
+ patience_epochs: 10
85
+ pin_mem: false
86
+ pretrained: true
87
+ ratio:
88
+ - 0.75
89
+ - 1.3333333333333333
90
+ recount: 1
91
+ recovery_interval: 0
92
+ remode: pixel
93
+ reprob: 0.3
94
+ resplit: false
95
+ resume: ''
96
+ save_images: false
97
+ scale:
98
+ - 0.08
99
+ - 1.0
100
+ sched: cosine
101
+ seed: 42
102
+ smoothing: 0.1
103
+ split_bn: false
104
+ start_epoch: null
105
+ std: null
106
+ sync_bn: false
107
+ torchscript: false
108
+ train_interpolation: random
109
+ train_split: train
110
+ tta: 0
111
+ use_multi_epochs_loader: false
112
+ val_split: validation
113
+ validation_batch_size: null
114
+ vflip: 0.0
115
+ warmup_epochs: 10
116
+ warmup_lr: 1.0e-06
117
+ weight_decay: 0.01
118
+ worker_seeding: all
119
+ workers: 6