visualglm-6b / pytorch_model.bin.index.json
zxdu20's picture
Add pytorch_model.bin.index.json
fac0602
raw
history blame
107 kB
{
"metadata": {
"total_size": 17793851136
},
"weight_map": {
"image_encoder.glm_proj.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.glm_proj.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.final_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.final_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.cross_attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.cross_attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.cross_attention.key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.cross_attention.key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.cross_attention.query.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.cross_attention.query.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.post_cross_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.0.post_cross_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.cross_attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.cross_attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.cross_attention.key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.cross_attention.key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.cross_attention.query.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.cross_attention.query.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.post_cross_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.10.post_cross_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.cross_attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.cross_attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.cross_attention.key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.cross_attention.key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.cross_attention.query.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.cross_attention.query.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.post_cross_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.2.post_cross_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.cross_attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.cross_attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.cross_attention.key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.cross_attention.key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.cross_attention.query.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.cross_attention.query.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.post_cross_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.4.post_cross_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.cross_attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.cross_attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.cross_attention.key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.cross_attention.key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.cross_attention.query.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.cross_attention.query.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.post_cross_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.6.post_cross_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.cross_attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.cross_attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.cross_attention.key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.cross_attention.key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.cross_attention.query.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.cross_attention.query.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.post_cross_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.8.post_cross_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.qformer.transformer.word_embeddings.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.mixins.cls.ln_vision.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.mixins.cls.ln_vision.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.mixins.patch_embedding.proj.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.mixins.patch_embedding.proj.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.23.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.24.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.28.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.29.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.3.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.30.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.30.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.31.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.32.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.33.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.34.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.35.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.36.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.37.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.attention.dense.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.attention.dense.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.attention.query_key_value.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.attention.query_key_value.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.input_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.input_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.post_attention_layernorm.bias": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.38.post_attention_layernorm.weight": "pytorch_model-00005-of-00005.bin",
"image_encoder.vit.transformer.layers.4.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.position_embeddings.weight": "pytorch_model-00004-of-00005.bin",
"image_encoder.vit.transformer.word_embeddings.weight": "pytorch_model-00004-of-00005.bin",
"lm_head.weight": "pytorch_model-00004-of-00005.bin",
"transformer.final_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"transformer.final_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.0.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.10.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.17.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.17.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.17.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.2.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.20.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.attention.dense.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.attention.dense.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.attention.query_key_value.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.input_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.input_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.26.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00003-of-00005.bin",
"transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00003-of-00005.bin",
"transformer.layers.27.attention.dense.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.attention.dense.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.attention.query_key_value.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.input_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.input_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00004-of-00005.bin",
"transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00004-of-00005.bin",
"transformer.layers.3.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.attention.dense.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.attention.dense.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.7.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.attention.query_key_value.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00005.bin",
"transformer.layers.7.input_layernorm.bias": "pytorch_model-00001-of-00005.bin",
"transformer.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00005.bin",
"transformer.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.attention.dense.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.attention.dense.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.attention.query_key_value.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.input_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00002-of-00005.bin",
"transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00005.bin",
"transformer.word_embeddings.weight": "pytorch_model-00001-of-00005.bin"
}
}