pszemraj commited on
Commit
160c7d9
1 Parent(s): 064f130

Model save

Browse files
Files changed (5) hide show
  1. README.md +75 -0
  2. config.json +58 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +44 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/dinov2-base-imagenet1k-1-layer
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ - f1
9
+ - precision
10
+ - recall
11
+ - matthews_correlation
12
+ model-index:
13
+ - name: dinov2-base-imagenet1k-1-layer-boulderspot-vN
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # dinov2-base-imagenet1k-1-layer-boulderspot-vN
21
+
22
+ This model is a fine-tuned version of [facebook/dinov2-base-imagenet1k-1-layer](https://huggingface.co/facebook/dinov2-base-imagenet1k-1-layer) on an unknown dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.0519
25
+ - Accuracy: 0.9810
26
+ - F1: 0.9809
27
+ - Precision: 0.9808
28
+ - Recall: 0.9810
29
+ - Matthews Correlation: 0.8501
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 2e-05
49
+ - train_batch_size: 16
50
+ - eval_batch_size: 16
51
+ - seed: 7395
52
+ - gradient_accumulation_steps: 4
53
+ - total_train_batch_size: 64
54
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
+ - lr_scheduler_type: cosine
56
+ - lr_scheduler_warmup_ratio: 0.05
57
+ - num_epochs: 5.0
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | Matthews Correlation |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|:--------------------:|
63
+ | 0.1596 | 1.0 | 203 | 0.0733 | 0.9766 | 0.9759 | 0.9757 | 0.9766 | 0.8079 |
64
+ | 0.0635 | 2.0 | 406 | 0.1276 | 0.9474 | 0.9522 | 0.9619 | 0.9474 | 0.6845 |
65
+ | 0.1031 | 3.0 | 609 | 0.0602 | 0.9751 | 0.9755 | 0.9760 | 0.9751 | 0.8118 |
66
+ | 0.0587 | 4.0 | 813 | 0.0512 | 0.9737 | 0.9734 | 0.9732 | 0.9737 | 0.7905 |
67
+ | 0.038 | 4.99 | 1015 | 0.0519 | 0.9810 | 0.9809 | 0.9808 | 0.9810 | 0.8501 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.39.2
73
+ - Pytorch 2.4.0.dev20240328+cu121
74
+ - Datasets 2.18.0
75
+ - Tokenizers 0.15.2
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/dinov2-base-imagenet1k-1-layer",
3
+ "apply_layernorm": true,
4
+ "architectures": [
5
+ "Dinov2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "drop_path_rate": 0.0,
9
+ "finetuning_task": "image-classification",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "bouldering_area",
15
+ "1": "other"
16
+ },
17
+ "image_size": 518,
18
+ "initializer_range": 0.02,
19
+ "label2id": {
20
+ "bouldering_area": "0",
21
+ "other": "1"
22
+ },
23
+ "layer_norm_eps": 1e-06,
24
+ "layerscale_value": 1.0,
25
+ "mlp_ratio": 4,
26
+ "model_type": "dinov2",
27
+ "num_attention_heads": 12,
28
+ "num_channels": 3,
29
+ "num_hidden_layers": 12,
30
+ "out_features": [
31
+ "stage12"
32
+ ],
33
+ "out_indices": [
34
+ 12
35
+ ],
36
+ "patch_size": 14,
37
+ "problem_type": "single_label_classification",
38
+ "qkv_bias": true,
39
+ "reshape_hidden_states": true,
40
+ "stage_names": [
41
+ "stem",
42
+ "stage1",
43
+ "stage2",
44
+ "stage3",
45
+ "stage4",
46
+ "stage5",
47
+ "stage6",
48
+ "stage7",
49
+ "stage8",
50
+ "stage9",
51
+ "stage10",
52
+ "stage11",
53
+ "stage12"
54
+ ],
55
+ "torch_dtype": "float32",
56
+ "transformers_version": "4.39.2",
57
+ "use_swiglu_ffn": false
58
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02732ec14eebfccd75a12603901f1f75ffe44421ac607f8bc0ca0e6521b518bf
3
+ size 346359928
preprocessor_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_center_crop",
8
+ "crop_size",
9
+ "do_rescale",
10
+ "rescale_factor",
11
+ "do_normalize",
12
+ "image_mean",
13
+ "image_std",
14
+ "do_convert_rgb",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format"
18
+ ],
19
+ "crop_size": {
20
+ "height": 224,
21
+ "width": 224
22
+ },
23
+ "do_center_crop": true,
24
+ "do_convert_rgb": true,
25
+ "do_normalize": true,
26
+ "do_rescale": true,
27
+ "do_resize": true,
28
+ "image_mean": [
29
+ 0.485,
30
+ 0.456,
31
+ 0.406
32
+ ],
33
+ "image_processor_type": "BitImageProcessor",
34
+ "image_std": [
35
+ 0.229,
36
+ 0.224,
37
+ 0.225
38
+ ],
39
+ "resample": 3,
40
+ "rescale_factor": 0.00392156862745098,
41
+ "size": {
42
+ "shortest_edge": 256
43
+ }
44
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8963a11d90a7990472b4b49b1a0b4d17650c27563cd431becef88fda91651637
3
+ size 5112