{ "_name_or_path": "deepseek-ai/deepseek-coder-7b-base-v1.5", "architectures": [ "DetikzifyForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 100000, "concat_patches": 2, "eos_token_id": 100015, "feature_layer": 26, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 11008, "max_position_embeddings": 4096, "mm_hidden_size": 2304, "model_type": "detikzify", "num_attention_heads": 32, "num_hidden_layers": 30, "num_key_value_heads": 32, "num_patches": 364, "patch_token_id": 100000, "pretraining_tp": 1, "rms_norm_eps": 1e-06, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "float32", "transformers_version": "4.38.1", "use_cache": true, "use_mm_proj": true, "vision_config": { "architecture": "vit_so400m_patch14_siglip_384", "classifier": "head", "crop_mode": "center", "crop_pct": 0.9, "custom_load": false, "first_conv": "patch_embed.proj", "fixed_input_size": true, "hf_hub_filename": "open_clip_pytorch_model.bin", "hf_hub_id": "timm/ViT-SO400M-14-SigLIP-384", "input_size": [ 3, 384, 384 ], "interpolation": "bicubic", "mean": [ 0.5, 0.5, 0.5 ], "num_classes": 0, "pool_size": null, "std": [ 0.5, 0.5, 0.5 ], "tag": "webli", "url": "" }, "vision_tower": "vit_so400m_patch14_siglip_384.webli", "vocab_size": 102400 }