File size: 3,075 Bytes
524fb7f
968ab4f
7753f10
9396a37
669cf62
c3c0ca7
a94c452
8d999b2
1af0ce2
8f4dfe0
d2295e6
0705567
1de5e6a
182f295
bd66813
4a6663e
7496b00
ba8a93a
2b09de8
3ce8863
4b09ed3
1c8ca21
b367895
e21cf5a
dee0682
2a0a462
c9ea0f3
cc48b6b
079c3ba
2088ae7
ca3f286
87df6d6
c7de7a4
3c97bdf
386c9d5
a83ad20
bf8d9c4
d1b03f6
7258ef2
39ed28e
587db2b
77c86c2
ab693b4
82780db
44476eb
290b8e1
bb755e9
6090edd
f7e9161
64e7247
33b60a5
640b5aa
2d64f90
0aa12b3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
Started at: 15:20:16
({'_name_or_path': '/disk4/folder1/working/checkpoints/huggingface/native_pytorch/step4_8/', 'attention_probs_dropout_prob': 0.1, 'directionality': 'bidi', 'gradient_checkpointing': False, 'hidden_act': 'gelu', 'hidden_dropout_prob': 0.1, 'hidden_size': 768, 'initializer_range': 0.02, 'intermediate_size': 3072, 'layer_norm_eps': 1e-12, 'max_position_embeddings': 512, 'model_type': 'bert', 'num_attention_heads': 12, 'num_hidden_layers': 12, 'pad_token_id': 0, 'pooler_fc_size': 768, 'pooler_num_attention_heads': 12, 'pooler_num_fc_layers': 3, 'pooler_size_per_head': 128, 'pooler_type': 'first_token_transform', 'position_embedding_type': 'absolute', 'type_vocab_size': 2, 'vocab_size': 119547, '_commit_hash': '82b194c0b3ea1fcad65f1eceee04adb26f9f71ac'}, {})
Epoch: 0
Training loss: 0.6578984168859628 - MAE: 0.6674722109137042
Validation loss : 0.32113080024719237 - MAE: 0.45643225009255417
Epoch: 1
Training loss: 0.25543879431027633 - MAE: 0.3910323237625205
Validation loss : 0.2070162296295166 - MAE: 0.3427800552704421
Epoch: 2
Training loss: 0.20022909687115595 - MAE: 0.34185446647043155
Validation loss : 0.1910509407520294 - MAE: 0.3408369084923425
Epoch: 3
Training loss: 0.18548667545502 - MAE: 0.324754784674793
Validation loss : 0.17563332319259645 - MAE: 0.32411278291801865
Epoch: 4
Training loss: 0.17486068262503698 - MAE: 0.3197417418736398
Validation loss : 0.16728453636169432 - MAE: 0.31319684318909913
Epoch: 5
Training loss: 0.16917219643409437 - MAE: 0.3116997644461795
Validation loss : 0.16494652032852172 - MAE: 0.31156720464741927
Epoch: 6
Training loss: 0.1663729869402372 - MAE: 0.3068122622576569
Validation loss : 0.16140909492969513 - MAE: 0.30731822951750715
Epoch: 7
Training loss: 0.16431669317759 - MAE: 0.3058750525941657
Validation loss : 0.15877128541469573 - MAE: 0.30407342135000287
Epoch: 8
Training loss: 0.16241646844607133 - MAE: 0.3041819685850804
Validation loss : 0.1569153904914856 - MAE: 0.3017167806049155
Epoch: 9
Training loss: 0.16105010303167197 - MAE: 0.3032572371335961
Validation loss : 0.15558656454086303 - MAE: 0.3001891695223057
Epoch: 10
Training loss: 0.15921874344348907 - MAE: 0.3000818604914875
Validation loss : 0.15467585921287536 - MAE: 0.29914423021883907
Epoch: 11
Training loss: 0.15774007026965803 - MAE: 0.2982069839270297
Validation loss : 0.1533532589673996 - MAE: 0.297580975135282
Epoch: 12
Training loss: 0.15732021515186018 - MAE: 0.2976011320081585
Validation loss : 0.15192450284957887 - MAE: 0.29573920735107395
Epoch: 13
Training loss: 0.15720971043293291 - MAE: 0.29652890957399775
Validation loss : 0.1509649246931076 - MAE: 0.29458318810555245
Epoch: 14
Training loss: 0.15484117773862985 - MAE: 0.29611318781416973
Validation loss : 0.1503614068031311 - MAE: 0.2939076775146755
Epoch: 15
Training loss: 0.155086215872031 - MAE: 0.29664500405408706
Validation loss : 0.15024077594280244 - MAE: 0.29392331689441004
Epoch: 16
Training loss: 0.15504858241631433 - MAE: 0.2957256243301222
Validation loss : 0.1485931918025017 - MAE: 0.2914224770774145
Epoch: 17