timm
/

Image Classification
timm
PyTorch
Safetensors
Ross Wightman commited on
Commit
d9eff88
1 Parent(s): e41953e

Add weights and args

Browse files
Files changed (2) hide show
  1. pytorch_model.bin +3 -0
  2. train_args.yaml +118 -0
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49952b92d01615a3d4cc0ccedd6c715e6b824e9f9dc92c44a6379e350f7b48b
3
+ size 1216860269
train_args.yaml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aa: rand-m9-n3-inc1-mstd101
2
+ amp: true
3
+ aot_autograd: false
4
+ apex_amp: false
5
+ aug_repeats: 0
6
+ aug_splits: 0
7
+ batch_size: 64
8
+ bce_loss: false
9
+ bce_target_thresh: null
10
+ bn_eps: null
11
+ bn_momentum: null
12
+ channels_last: false
13
+ checkpoint_hist: 10
14
+ class_map: ''
15
+ clip_grad: 3.0
16
+ clip_mode: norm
17
+ color_jitter: 0.4
18
+ cooldown_epochs: 10
19
+ crop_pct: null
20
+ cutmix: 0.0
21
+ cutmix_minmax: null
22
+ data_dir: /data/tfds
23
+ dataset: tfds/imagenet2012
24
+ dataset_download: false
25
+ decay_epochs: 100
26
+ decay_milestones:
27
+ - 30
28
+ - 60
29
+ decay_rate: 0.1
30
+ dist_bn: reduce
31
+ drop: 0.0
32
+ drop_block: null
33
+ drop_connect: null
34
+ drop_path: 0.1
35
+ epoch_repeats: 0.0
36
+ epochs: 50
37
+ eval_metric: top1
38
+ experiment: ''
39
+ fast_norm: false
40
+ fuser: ''
41
+ gp: null
42
+ grad_checkpointing: false
43
+ hflip: 0.5
44
+ img_size: null
45
+ initial_checkpoint: ''
46
+ input_size: null
47
+ interpolation: ''
48
+ jsd_loss: false
49
+ layer_decay: 0.8
50
+ local_rank: 0
51
+ log_interval: 50
52
+ log_wandb: false
53
+ lr: 6.0e-05
54
+ lr_cycle_decay: 0.5
55
+ lr_cycle_limit: 1
56
+ lr_cycle_mul: 1.0
57
+ lr_k_decay: 1.0
58
+ lr_noise: null
59
+ lr_noise_pct: 0.67
60
+ lr_noise_std: 1.0
61
+ mean: null
62
+ min_lr: 5.0e-07
63
+ mixup: 0.0
64
+ mixup_mode: batch
65
+ mixup_off_epoch: 0
66
+ mixup_prob: 1.0
67
+ mixup_switch_prob: 0.5
68
+ model: vit_large_patch14_clip_224.laion2b_ft_in12k
69
+ model_ema: true
70
+ model_ema_decay: 0.9997
71
+ model_ema_force_cpu: false
72
+ momentum: 0.9
73
+ native_amp: false
74
+ no_aug: false
75
+ no_ddp_bb: false
76
+ no_prefetcher: false
77
+ no_resume_opt: false
78
+ num_classes: 1000
79
+ opt: adamw
80
+ opt_betas: null
81
+ opt_eps: null
82
+ output: ''
83
+ patience_epochs: 10
84
+ pin_mem: false
85
+ pretrained: true
86
+ ratio:
87
+ - 0.75
88
+ - 1.3333333333333333
89
+ recount: 1
90
+ recovery_interval: 0
91
+ remode: pixel
92
+ reprob: 0.3
93
+ resplit: false
94
+ resume: ''
95
+ save_images: false
96
+ scale:
97
+ - 0.08
98
+ - 1.0
99
+ sched: cosine
100
+ seed: 42
101
+ smoothing: 0.1
102
+ split_bn: false
103
+ start_epoch: null
104
+ std: null
105
+ sync_bn: false
106
+ torchscript: false
107
+ train_interpolation: random
108
+ train_split: train
109
+ tta: 0
110
+ use_multi_epochs_loader: false
111
+ val_split: validation
112
+ validation_batch_size: null
113
+ vflip: 0.0
114
+ warmup_epochs: 10
115
+ warmup_lr: 1.0e-06
116
+ weight_decay: 0.01
117
+ worker_seeding: all
118
+ workers: 6