St4r4x-NV commited on
Commit
a520d02
·
verified ·
1 Parent(s): 2cc29dc

End of training

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ base_model: MCG-NJU/videomae-base
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: videomae-base-finetuned-Cheh2_ucf_light_demo-10epochs
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # videomae-base-finetuned-Cheh2_ucf_light_demo-10epochs
17
+
18
+ This model is a fine-tuned version of [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 1.6448
21
+ - Accuracy: 0.5270
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - lr_scheduler_warmup_ratio: 0.1
47
+ - training_steps: 15010
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
52
+ |:-------------:|:------:|:-----:|:---------------:|:--------:|
53
+ | 2.3441 | 0.1001 | 1502 | 2.3923 | 0.2469 |
54
+ | 1.6506 | 1.1001 | 3004 | 2.0596 | 0.3513 |
55
+ | 1.1963 | 2.1001 | 4506 | 2.0148 | 0.4035 |
56
+ | 1.5381 | 3.1001 | 6008 | 1.8796 | 0.4262 |
57
+ | 1.5438 | 4.1001 | 7510 | 1.8851 | 0.4398 |
58
+ | 1.225 | 5.1001 | 9012 | 1.7874 | 0.4649 |
59
+ | 1.038 | 6.1001 | 10514 | 1.7263 | 0.4852 |
60
+ | 1.0129 | 7.1001 | 12016 | 1.7192 | 0.5046 |
61
+ | 1.0309 | 8.1001 | 13518 | 1.6448 | 0.5270 |
62
+ | 0.91 | 9.0994 | 15010 | 1.6655 | 0.5144 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.43.4
68
+ - Pytorch 2.4.1
69
+ - Datasets 2.20.0
70
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.099400399733511,
3
+ "eval_accuracy": 0.5269582909460834,
4
+ "eval_loss": 1.6447620391845703,
5
+ "eval_runtime": 947.2239,
6
+ "eval_samples_per_second": 3.113,
7
+ "eval_steps_per_second": 0.39
8
+ }
config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MCG-NJU/videomae-base",
3
+ "architectures": [
4
+ "VideoMAEForVideoClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "decoder_hidden_size": 384,
8
+ "decoder_intermediate_size": 1536,
9
+ "decoder_num_attention_heads": 6,
10
+ "decoder_num_hidden_layers": 4,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.0,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "blowing nose",
16
+ "1": "brushing teeth",
17
+ "2": "crying",
18
+ "3": "drinking shots",
19
+ "4": "eating chips",
20
+ "5": "laughing",
21
+ "6": "sipping cup",
22
+ "7": "sleeping",
23
+ "8": "smoking",
24
+ "9": "sneezing",
25
+ "10": "talking on cell phone",
26
+ "11": "texting",
27
+ "12": "throwing tantrum",
28
+ "13": "using inhaler",
29
+ "14": "waking up",
30
+ "15": "watching tv"
31
+ },
32
+ "image_size": 224,
33
+ "initializer_range": 0.02,
34
+ "intermediate_size": 3072,
35
+ "label2id": {
36
+ "blowing nose": 0,
37
+ "brushing teeth": 1,
38
+ "crying": 2,
39
+ "drinking shots": 3,
40
+ "eating chips": 4,
41
+ "laughing": 5,
42
+ "sipping cup": 6,
43
+ "sleeping": 7,
44
+ "smoking": 8,
45
+ "sneezing": 9,
46
+ "talking on cell phone": 10,
47
+ "texting": 11,
48
+ "throwing tantrum": 12,
49
+ "using inhaler": 13,
50
+ "waking up": 14,
51
+ "watching tv": 15
52
+ },
53
+ "layer_norm_eps": 1e-12,
54
+ "model_type": "videomae",
55
+ "norm_pix_loss": true,
56
+ "num_attention_heads": 12,
57
+ "num_channels": 3,
58
+ "num_frames": 16,
59
+ "num_hidden_layers": 12,
60
+ "patch_size": 16,
61
+ "problem_type": "single_label_classification",
62
+ "qkv_bias": true,
63
+ "torch_dtype": "float32",
64
+ "transformers_version": "4.43.4",
65
+ "tubelet_size": 2,
66
+ "use_mean_pooling": false
67
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a03cf20ea32e3d08e0cc8dbccae2aee7f6c6e24466d58065a31117e9f3257df
3
+ size 344980440
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.485,
12
+ 0.456,
13
+ 0.406
14
+ ],
15
+ "image_processor_type": "VideoMAEImageProcessor",
16
+ "image_std": [
17
+ 0.229,
18
+ 0.224,
19
+ 0.225
20
+ ],
21
+ "resample": 2,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "shortest_edge": 224
25
+ }
26
+ }
runs/Jan13_13-37-41_aatrox-B560M-SILVER/events.out.tfevents.1736771890.aatrox-B560M-SILVER.8395.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c32c5c55ac2ad3c7ec806e2a787ad4d49be52c7bb1dc2962a1204da689fd9a9
3
+ size 5252
runs/Jan13_13-41-35_aatrox-B560M-SILVER/events.out.tfevents.1736772097.aatrox-B560M-SILVER.9634.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6d8a7760d22eb19abb68e057b1edfafbc3b49605f1ced236b1adb56f0cac7e3
3
+ size 5252
runs/Jan13_13-47-25_aatrox-B560M-SILVER/events.out.tfevents.1736772446.aatrox-B560M-SILVER.10463.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4879735557ce97f744479f8206e418f454c70f4b2064d88cd2ce44c361b88bf1
3
+ size 5252
runs/Jan13_13-53-01_aatrox-B560M-SILVER/events.out.tfevents.1736772782.aatrox-B560M-SILVER.10876.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c1fbc0a8271523c73a385679397c4827555cdf3bb9fecf21faacc7f90a9124
3
+ size 5252
runs/Jan13_13-57-11_aatrox-B560M-SILVER/events.out.tfevents.1736773032.aatrox-B560M-SILVER.11224.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b46b6488eff45727044a2760e4ed07fb11c6b0095bd7d3afaecc754b9967767a
3
+ size 5252
runs/Jan13_14-04-19_aatrox-B560M-SILVER/events.out.tfevents.1736773460.aatrox-B560M-SILVER.11822.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:223c948f35732adc67d2f734dd7349fefa6f692dce0fa89c8593a4bddfe80ee7
3
+ size 5252
runs/Jan13_14-04-35_aatrox-B560M-SILVER/events.out.tfevents.1736773476.aatrox-B560M-SILVER.12141.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad52b374363ecdac851c962b269e4070e18cc90de0adf7698c2251da74bf3b4e
3
+ size 326185
runs/Jan13_14-04-35_aatrox-B560M-SILVER/events.out.tfevents.1736844506.aatrox-B560M-SILVER.12141.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96c03ba751b520e137a4d082a086abc806e51ca5a993b3a029d9812af1846052
3
+ size 734
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.099400399733511,
3
+ "eval_accuracy": 0.5269582909460834,
4
+ "eval_loss": 1.6447620391845703,
5
+ "eval_runtime": 947.2239,
6
+ "eval_samples_per_second": 3.113,
7
+ "eval_steps_per_second": 0.39
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb521525eab2f9a8e74db26d353f174cef91135cb791b55dfd035ff53e167b49
3
+ size 5304