{ "architectures": [ "ReformerModelWithLMHead" ], "attention_head_size": 64, "attention_probs_dropout_prob": 0.1, "attn_type": "mixed", "axial_norm_std": 1.0, "axial_pos_embds": true, "axial_pos_embds_dim": [ 64, 192 ], "axial_pos_shape": [ 512, 1024 ], "chunk_length": 64, "chunk_size_feed_forward": 64, "chunk_size_lm_head": 64, "feed_forward_size": 512, "hidden_act": "gelu", "hidden_dropout_prob": 0.05, "hidden_size": 256, "initializer_range": 0.02, "intermediate_size": 3072, "is_decoder": true, "layer_norm_eps": 1e-12, "max_position_embeddings": 524288, "model_type": "bert", "num_attention_heads": 2, "num_buckets": null, "num_chunks_after": 0, "num_chunks_before": 1, "num_hashes": 4, "num_hidden_layers": 6, "output_past": true, "seed": 0, "sinusoidal_pos_embds": false, "type_vocab_size": 2, "vocab_size": 320 }