plip's picture
Add tr-mbertmodel-monotok-adapter
55306e8
raw
history blame
1.71 kB
{
"adapters": {
"adapters": {
},
"config_map": {
"d345095bcb01e325": {
"adapter_residual_before_ln": false,
"attention_type": null,
"invertible_adapter": null,
"leave_out": [],
"ln_after": false,
"ln_before": false,
"mh_adapter": false,
"new_attention_norm": null,
"non_linearity": "gelu",
"original_ln_after": true,
"original_ln_before": true,
"output_adapter": true,
"reduction_factor": 2,
"residual_before_ln": true
},
"text_lang": {
"adapter_residual_before_ln": false,
"attention_type": null,
"invertible_adapter": null,
"leave_out": [],
"ln_after": false,
"ln_before": false,
"mh_adapter": false,
"new_attention_norm": null,
"non_linearity": "gelu",
"original_ln_after": true,
"original_ln_before": true,
"output_adapter": true,
"reduction_factor": 2,
"residual_before_ln": true
}
}
},
"architectures": [
"BertForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"embeddings_type": "full",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 32000
}