danaaubakirova HF staff commited on
Commit
1e57021
1 Parent(s): d32a8ca

Upload MPLUGDocOwlForConditionalGeneration

Browse files
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2eee20a0799088cacd1784ad368ef3854358f7a4c2e4757bd66e05c2e2a751cc
3
- size 4987659872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8687e91a4b2db96ce32d73ef9e10521ccee594cef1f682e9ffdd7ac2b918535
3
+ size 4987659896
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42b82cfe98bc69c8cdc304e772b197b15c97bc5f5d2b03414064d275117107b6
3
  size 1296675232
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3ec5e148ac6fee5849353b9a1090fd615daba5f50bfc6b91427d980741e6e5f
3
  size 1296675232
model.safetensors.index.json CHANGED
@@ -434,6 +434,8 @@
434
  "vision_tower.vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
435
  "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
436
  "vision_tower.vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
 
 
437
  "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
438
  "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
439
  "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
@@ -723,8 +725,6 @@
723
  "vision_tower.vision_model.encoder.layers.9.self_attn.q_v_k_proj.bias": "model-00001-of-00004.safetensors",
724
  "vision_tower.vision_model.encoder.layers.9.self_attn.q_v_k_proj.weight": "model-00001-of-00004.safetensors",
725
  "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
726
- "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors",
727
- "vision_tower.vision_model.pre_layernorm.bias": "model-00001-of-00004.safetensors",
728
- "vision_tower.vision_model.pre_layernorm.weight": "model-00001-of-00004.safetensors"
729
  }
730
  }
 
434
  "vision_tower.vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
435
  "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
436
  "vision_tower.vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
437
+ "vision_tower.vision_model.embeddings.pre_layernorm.bias": "model-00001-of-00004.safetensors",
438
+ "vision_tower.vision_model.embeddings.pre_layernorm.weight": "model-00001-of-00004.safetensors",
439
  "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
440
  "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
441
  "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
 
725
  "vision_tower.vision_model.encoder.layers.9.self_attn.q_v_k_proj.bias": "model-00001-of-00004.safetensors",
726
  "vision_tower.vision_model.encoder.layers.9.self_attn.q_v_k_proj.weight": "model-00001-of-00004.safetensors",
727
  "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
728
+ "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors"
 
 
729
  }
730
  }