Felixvi commited on
Commit
f6ac792
1 Parent(s): 18312f6

Training in progress, epoch 0

Browse files
config.json CHANGED
@@ -1,21 +1,23 @@
1
  {
2
- "_name_or_path": "microsoft/swinv2-tiny-patch4-window8-256",
3
  "architectures": [
4
- "Swinv2ForImageClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.0,
7
  "depths": [
8
- 2,
9
- 2,
10
  6,
11
- 2
 
 
 
 
 
 
 
 
 
12
  ],
13
- "drop_path_rate": 0.1,
14
- "embed_dim": 96,
15
- "encoder_stride": 32,
16
- "hidden_act": "gelu",
17
- "hidden_dropout_prob": 0.0,
18
- "hidden_size": 768,
19
  "id2label": {
20
  "0": "\u54ed",
21
  "1": "\u5750",
@@ -36,8 +38,6 @@
36
  "8": "\u6708\u4eae",
37
  "9": "\u72d7"
38
  },
39
- "image_size": 256,
40
- "initializer_range": 0.02,
41
  "label2id": {
42
  "\u54ed": "0",
43
  "\u5750": "1",
@@ -58,29 +58,23 @@
58
  "\u81ea\u884c\u8f66": "16",
59
  "\u829d\u9ebb": "17"
60
  },
61
- "layer_norm_eps": 1e-05,
62
- "mlp_ratio": 4.0,
63
- "model_type": "swinv2",
64
  "num_channels": 3,
65
- "num_heads": [
66
- 3,
67
- 6,
68
- 12,
69
- 24
70
  ],
71
- "num_layers": 4,
72
- "patch_size": 4,
73
- "path_norm": true,
74
- "pretrained_window_sizes": [
75
- 0,
76
- 0,
77
- 0,
78
- 0
79
  ],
80
  "problem_type": "single_label_classification",
81
- "qkv_bias": true,
 
 
 
 
 
 
82
  "torch_dtype": "float32",
83
- "transformers_version": "4.28.1",
84
- "use_absolute_embeddings": false,
85
- "window_size": 8
86
  }
 
1
  {
2
+ "_name_or_path": "microsoft/resnet-50",
3
  "architectures": [
4
+ "ResNetForImageClassification"
5
  ],
 
6
  "depths": [
7
+ 3,
8
+ 4,
9
  6,
10
+ 3
11
+ ],
12
+ "downsample_in_first_stage": false,
13
+ "embedding_size": 64,
14
+ "hidden_act": "relu",
15
+ "hidden_sizes": [
16
+ 256,
17
+ 512,
18
+ 1024,
19
+ 2048
20
  ],
 
 
 
 
 
 
21
  "id2label": {
22
  "0": "\u54ed",
23
  "1": "\u5750",
 
38
  "8": "\u6708\u4eae",
39
  "9": "\u72d7"
40
  },
 
 
41
  "label2id": {
42
  "\u54ed": "0",
43
  "\u5750": "1",
 
58
  "\u81ea\u884c\u8f66": "16",
59
  "\u829d\u9ebb": "17"
60
  },
61
+ "layer_type": "bottleneck",
62
+ "model_type": "resnet",
 
63
  "num_channels": 3,
64
+ "out_features": [
65
+ "stage4"
 
 
 
66
  ],
67
+ "out_indices": [
68
+ 4
 
 
 
 
 
 
69
  ],
70
  "problem_type": "single_label_classification",
71
+ "stage_names": [
72
+ "stem",
73
+ "stage1",
74
+ "stage2",
75
+ "stage3",
76
+ "stage4"
77
+ ],
78
  "torch_dtype": "float32",
79
+ "transformers_version": "4.28.1"
 
 
80
  }
preprocessor_config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "do_normalize": true,
3
  "do_rescale": true,
4
  "do_resize": true,
@@ -7,7 +8,7 @@
7
  0.456,
8
  0.406
9
  ],
10
- "image_processor_type": "ViTImageProcessor",
11
  "image_std": [
12
  0.229,
13
  0.224,
@@ -16,7 +17,6 @@
16
  "resample": 3,
17
  "rescale_factor": 0.00392156862745098,
18
  "size": {
19
- "height": 256,
20
- "width": 256
21
  }
22
  }
 
1
  {
2
+ "crop_pct": 0.875,
3
  "do_normalize": true,
4
  "do_rescale": true,
5
  "do_resize": true,
 
8
  0.456,
9
  0.406
10
  ],
11
+ "image_processor_type": "ConvNextImageProcessor",
12
  "image_std": [
13
  0.229,
14
  0.224,
 
17
  "resample": 3,
18
  "rescale_factor": 0.00392156862745098,
19
  "size": {
20
+ "shortest_edge": 224
 
21
  }
22
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa07eac0596c767e67d8d08eb4e5af2ae515141b973a72b99fbabc6811e2ff5a
3
- size 110454369
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4746e51d3dca39f74a785efbc0bf0afa6cb739404a9b31958921638aaa83970
3
+ size 94506125
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a66eefcbd71bec5aec9d17e31e6da58c63b88728cf97b70418760c53fcae3e00
3
  size 3579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd7b11f82b436a2a0b7470a34996bb22e443106af25a560af8f8a254fa9434de
3
  size 3579