Training in progress, epoch 0
Browse files- config.json +51 -0
- preprocessor_config.json +16 -0
- pytorch_model.bin +3 -0
- training_args.bin +3 -0
config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "apple/mobilevitv2-1.0-imagenet1k-256",
|
3 |
+
"architectures": [
|
4 |
+
"MobileViTV2ForImageClassification"
|
5 |
+
],
|
6 |
+
"aspp_dropout_prob": 0.1,
|
7 |
+
"aspp_out_channels": 512,
|
8 |
+
"atrous_rates": [
|
9 |
+
6,
|
10 |
+
12,
|
11 |
+
18
|
12 |
+
],
|
13 |
+
"attn_dropout": 0.0,
|
14 |
+
"base_attn_unit_dims": [
|
15 |
+
128,
|
16 |
+
192,
|
17 |
+
256
|
18 |
+
],
|
19 |
+
"classifier_dropout_prob": 0.1,
|
20 |
+
"conv_kernel_size": 3,
|
21 |
+
"expand_ratio": 2.0,
|
22 |
+
"ffn_dropout": 0.0,
|
23 |
+
"ffn_multiplier": 2,
|
24 |
+
"hidden_act": "swish",
|
25 |
+
"id2label": {
|
26 |
+
"0": "bad",
|
27 |
+
"1": "good"
|
28 |
+
},
|
29 |
+
"image_size": 256,
|
30 |
+
"initializer_range": 0.02,
|
31 |
+
"label2id": {
|
32 |
+
"bad": 0,
|
33 |
+
"good": 1
|
34 |
+
},
|
35 |
+
"layer_norm_eps": 1e-05,
|
36 |
+
"mlp_ratio": 2.0,
|
37 |
+
"model_type": "mobilevitv2",
|
38 |
+
"n_attn_blocks": [
|
39 |
+
2,
|
40 |
+
4,
|
41 |
+
3
|
42 |
+
],
|
43 |
+
"num_channels": 3,
|
44 |
+
"output_stride": 32,
|
45 |
+
"patch_size": 2,
|
46 |
+
"problem_type": "single_label_classification",
|
47 |
+
"semantic_loss_ignore_index": 255,
|
48 |
+
"torch_dtype": "float32",
|
49 |
+
"transformers_version": "4.34.1",
|
50 |
+
"width_multiplier": 1.0
|
51 |
+
}
|
preprocessor_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": {
|
3 |
+
"height": 256,
|
4 |
+
"width": 256
|
5 |
+
},
|
6 |
+
"do_center_crop": true,
|
7 |
+
"do_flip_channel_order": true,
|
8 |
+
"do_rescale": true,
|
9 |
+
"do_resize": true,
|
10 |
+
"image_processor_type": "MobileViTImageProcessor",
|
11 |
+
"resample": 2,
|
12 |
+
"rescale_factor": 0.00392156862745098,
|
13 |
+
"size": {
|
14 |
+
"shortest_edge": 288
|
15 |
+
}
|
16 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c16ba59402444d6a0b4267e118aaea328611bc00276e5cfaca67465bed2bda4e
|
3 |
+
size 17720574
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8fd8b5613855644900845b6cda66832582e7e43671ddd06ce85043c100327830
|
3 |
+
size 4600
|