pixba / config.json
maxymoo2's picture
Upload folder using huggingface_hub
daac351 verified
{
"architectures": [
"PIXBAForPreTraining"
],
"attention_probs_dropout_prob": 0.1,
"bias": false,
"cache_dir": null,
"conv_bias": true,
"d_conv": 4,
"d_model": 768,
"d_state": 16,
"decoder_hidden_size": 512,
"decoder_intermediate_size": 2048,
"decoder_num_attention_heads": 16,
"decoder_num_hidden_layers": 8,
"device": null,
"dt_init": "random",
"dt_init_floor": 0.0001,
"dt_max": 0.1,
"dt_min": 0.001,
"dt_rank": "auto",
"dt_scale": 1.0,
"dtype": null,
"expand": 2,
"fused_add_norm": true,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"image_size": [
16,
8464
],
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"mask_ratio": 0.25,
"model_type": "pixel",
"norm_pix_loss": true,
"num_attention_heads": 12,
"num_channels": 3,
"num_decoder_layers": 2,
"num_encoder_layers": 4,
"num_hidden_layers": 12,
"num_patches": 529,
"pad_vocab_size_multiple": 8,
"patch_size": 16,
"qkv_bias": true,
"residual_in_fp32": true,
"revision": "main",
"rms_norm": true,
"ssm_cfg": {},
"torch_dtype": "float32",
"transformers_version": "4.17.0",
"use_auth_token": "redacted",
"use_fast_path": true,
"vocab_size": 50277
}