End of training
Browse files- README.md +63 -0
- config.json +59 -0
- model.safetensors +3 -0
- preprocessor_config.json +28 -0
- runs/Dec12_13-41-54_cf416507e750/events.out.tfevents.1702388515.cf416507e750.6145.3 +3 -0
- training_args.bin +3 -0
README.md
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
base_model: facebook/dinov2-small-imagenet1k-1-layer
|
4 |
+
tags:
|
5 |
+
- generated_from_trainer
|
6 |
+
metrics:
|
7 |
+
- accuracy
|
8 |
+
model-index:
|
9 |
+
- name: sample_foot
|
10 |
+
results: []
|
11 |
+
---
|
12 |
+
|
13 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
+
should probably proofread and complete it, then remove this comment. -->
|
15 |
+
|
16 |
+
# sample_foot
|
17 |
+
|
18 |
+
This model is a fine-tuned version of [facebook/dinov2-small-imagenet1k-1-layer](https://huggingface.co/facebook/dinov2-small-imagenet1k-1-layer) on an unknown dataset.
|
19 |
+
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 3.2852
|
21 |
+
- Accuracy: 0.4545
|
22 |
+
|
23 |
+
## Model description
|
24 |
+
|
25 |
+
More information needed
|
26 |
+
|
27 |
+
## Intended uses & limitations
|
28 |
+
|
29 |
+
More information needed
|
30 |
+
|
31 |
+
## Training and evaluation data
|
32 |
+
|
33 |
+
More information needed
|
34 |
+
|
35 |
+
## Training procedure
|
36 |
+
|
37 |
+
### Training hyperparameters
|
38 |
+
|
39 |
+
The following hyperparameters were used during training:
|
40 |
+
- learning_rate: 5e-05
|
41 |
+
- train_batch_size: 1
|
42 |
+
- eval_batch_size: 1
|
43 |
+
- seed: 42
|
44 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
45 |
+
- lr_scheduler_type: linear
|
46 |
+
- lr_scheduler_warmup_ratio: 0.1
|
47 |
+
- num_epochs: 3
|
48 |
+
|
49 |
+
### Training results
|
50 |
+
|
51 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
52 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
53 |
+
| 5.324 | 1.0 | 21 | 1.6904 | 0.4545 |
|
54 |
+
| 2.7691 | 2.0 | 42 | 3.3614 | 0.4545 |
|
55 |
+
| 3.0057 | 3.0 | 63 | 3.2852 | 0.4545 |
|
56 |
+
|
57 |
+
|
58 |
+
### Framework versions
|
59 |
+
|
60 |
+
- Transformers 4.35.2
|
61 |
+
- Pytorch 2.1.0+cu118
|
62 |
+
- Datasets 2.15.0
|
63 |
+
- Tokenizers 0.15.0
|
config.json
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/dinov2-small-imagenet1k-1-layer",
|
3 |
+
"apply_layernorm": true,
|
4 |
+
"architectures": [
|
5 |
+
"Dinov2ForImageClassification"
|
6 |
+
],
|
7 |
+
"attention_probs_dropout_prob": 0.0,
|
8 |
+
"drop_path_rate": 0.0,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.0,
|
11 |
+
"hidden_size": 384,
|
12 |
+
"id2label": {
|
13 |
+
"0": "Flat Arch",
|
14 |
+
"1": "High Arch",
|
15 |
+
"2": "Normal Arch"
|
16 |
+
},
|
17 |
+
"image_size": 518,
|
18 |
+
"initializer_range": 0.02,
|
19 |
+
"label2id": {
|
20 |
+
"Flat Arch": "0",
|
21 |
+
"High Arch": "1",
|
22 |
+
"Normal Arch": "2"
|
23 |
+
},
|
24 |
+
"layer_norm_eps": 1e-06,
|
25 |
+
"layerscale_value": 1.0,
|
26 |
+
"mlp_ratio": 4,
|
27 |
+
"model_type": "dinov2",
|
28 |
+
"num_attention_heads": 6,
|
29 |
+
"num_channels": 3,
|
30 |
+
"num_hidden_layers": 12,
|
31 |
+
"out_features": [
|
32 |
+
"stage12"
|
33 |
+
],
|
34 |
+
"out_indices": [
|
35 |
+
12
|
36 |
+
],
|
37 |
+
"patch_size": 14,
|
38 |
+
"problem_type": "single_label_classification",
|
39 |
+
"qkv_bias": true,
|
40 |
+
"reshape_hidden_states": true,
|
41 |
+
"stage_names": [
|
42 |
+
"stem",
|
43 |
+
"stage1",
|
44 |
+
"stage2",
|
45 |
+
"stage3",
|
46 |
+
"stage4",
|
47 |
+
"stage5",
|
48 |
+
"stage6",
|
49 |
+
"stage7",
|
50 |
+
"stage8",
|
51 |
+
"stage9",
|
52 |
+
"stage10",
|
53 |
+
"stage11",
|
54 |
+
"stage12"
|
55 |
+
],
|
56 |
+
"torch_dtype": "float32",
|
57 |
+
"transformers_version": "4.35.2",
|
58 |
+
"use_swiglu_ffn": false
|
59 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b12adbfd6f9fc1637912e62a4d84ffe690062c3f9343e9b7e235dff289c6d1ec
|
3 |
+
size 88260908
|
preprocessor_config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": {
|
3 |
+
"height": 224,
|
4 |
+
"width": 224
|
5 |
+
},
|
6 |
+
"do_center_crop": true,
|
7 |
+
"do_convert_rgb": true,
|
8 |
+
"do_normalize": true,
|
9 |
+
"do_rescale": true,
|
10 |
+
"do_resize": true,
|
11 |
+
"image_mean": [
|
12 |
+
0.485,
|
13 |
+
0.456,
|
14 |
+
0.406
|
15 |
+
],
|
16 |
+
"image_processor_type": "BitImageProcessor",
|
17 |
+
"image_std": [
|
18 |
+
0.229,
|
19 |
+
0.224,
|
20 |
+
0.225
|
21 |
+
],
|
22 |
+
"resample": 3,
|
23 |
+
"rescale_factor": 0.00392156862745098,
|
24 |
+
"size": {
|
25 |
+
"shortest_edge": 256
|
26 |
+
},
|
27 |
+
"use_square_size": false
|
28 |
+
}
|
runs/Dec12_13-41-54_cf416507e750/events.out.tfevents.1702388515.cf416507e750.6145.3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0d02d976ba3a353ab594ed2c29c162d652d7cd4e577e1f711da09936ffabf74
|
3 |
+
size 6960
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77480b03b7186ec53961386eb40a117aec07b419ab118c11f86609df11b16369
|
3 |
+
size 4600
|