Started at: 15:49:06 nb-bert-base, 0.001, 64 ({'_name_or_path': '/disk4/folder1/working/checkpoints/huggingface/native_pytorch/step4_8/', 'attention_probs_dropout_prob': 0.1, 'directionality': 'bidi', 'gradient_checkpointing': False, 'hidden_act': 'gelu', 'hidden_dropout_prob': 0.1, 'hidden_size': 768, 'initializer_range': 0.02, 'intermediate_size': 3072, 'layer_norm_eps': 1e-12, 'max_position_embeddings': 512, 'model_type': 'bert', 'num_attention_heads': 12, 'num_hidden_layers': 12, 'pad_token_id': 0, 'pooler_fc_size': 768, 'pooler_num_attention_heads': 12, 'pooler_num_fc_layers': 3, 'pooler_size_per_head': 128, 'pooler_type': 'first_token_transform', 'position_embedding_type': 'absolute', 'type_vocab_size': 2, 'vocab_size': 119547, '_commit_hash': '82b194c0b3ea1fcad65f1eceee04adb26f9f71ac'}, {}) Epoch: 0 Training loss: 0.25718529296643805 - MAE: 0.37999076735911946 Validation loss : 0.1592742403348287 - MAE: 0.3065637084501305 Epoch: 1 Training loss: 0.15984965507129226 - MAE: 0.3011462185386914 Validation loss : 0.14935108996701962 - MAE: 0.2937389535824439 Epoch: 2 Training loss: 0.15315821834585883 - MAE: 0.2948535177806519 Validation loss : 0.1472618676947825 - MAE: 0.2882854062636122 Epoch: 3 Training loss: 0.14876405646403631 - MAE: 0.2911724842484354 Validation loss : 0.14868425713344055 - MAE: 0.28793878924782046 Epoch: 4 Training loss: 0.14827223573670242 - MAE: 0.2906396281733135 Validation loss : 0.15348663451996716 - MAE: 0.2916254111596949 Epoch: 5