|
{ |
|
"metadata": { |
|
"total_size": 15569685504 |
|
}, |
|
"weight_map": { |
|
"transformer.blocks.0.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.0.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.1.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.10.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.11.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.12.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.13.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.14.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.15.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.16.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.17.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.18.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.19.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.2.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.20.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.21.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.22.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.23.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.23.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.23.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.23.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.23.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.24.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.24.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.25.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.26.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.27.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.28.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.29.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.3.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.3.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.30.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.30.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.attn.Wqkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.attn.Wqkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.attn.out_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.attn.out_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.ffn.down_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.ffn.down_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.ffn.up_proj.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.ffn.up_proj.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.norm_1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.norm_1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.norm_2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.31.norm_2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.blocks.4.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.4.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.5.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.6.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.7.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.8.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.attn.Wqkv.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.attn.Wqkv.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.attn.out_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.attn.out_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.ffn.down_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.ffn.down_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.ffn.up_proj.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.ffn.up_proj.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.norm_1.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.norm_1.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.norm_2.bias": "pytorch_model-00001-of-00002.bin", |
|
"transformer.blocks.9.norm_2.weight": "pytorch_model-00001-of-00002.bin", |
|
"transformer.mm_projector.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.mm_projector.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.norm_f.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.norm_f.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.crossattention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.0.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.1.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.crossattention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.10.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.11.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.crossattention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.2.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.3.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.crossattention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.4.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.5.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.crossattention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.6.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.7.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.crossattention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.8.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.attention.key.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.attention.key.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.attention.query.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.attention.query.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.attention.value.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.attention.value.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.output.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.output.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.output.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.attention.output.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.intermediate_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.intermediate_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.output_query.LayerNorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.output_query.LayerNorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.output_query.dense.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.encoder.layer.9.output_query.dense.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.layernorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.qformer.layernorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.query_tokens": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.embeddings.position_embedding": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.24.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.25.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.26.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.27.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.28.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.29.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.30.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.31.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.32.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.33.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.34.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.35.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.36.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.37.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.38.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.projection.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.projection.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.qkv.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.qkv.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.post_layernorm.bias": "pytorch_model-00002-of-00002.bin", |
|
"transformer.vision_tower.vision_tower.vision_model.post_layernorm.weight": "pytorch_model-00002-of-00002.bin", |
|
"transformer.wte.weight": "pytorch_model-00001-of-00002.bin" |
|
} |
|
} |
|
|