random-nllb-moe-2-experts / pytorch_model.bin.index.json
ArthurZ's picture
ArthurZ HF staff
Upload model
0537bfe
{
"metadata": {
"total_size": 19205242880
},
"weight_map": {
"decoder.embed_positions.weights": "pytorch_model-00002-of-00002.bin",
"decoder.embed_tokens.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.0.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.1.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.10.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_0.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_0.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_0.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_0.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_1.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_1.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_1.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.experts.expert_1.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.ffn.router.classifier.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.11.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.12.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.13.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.14.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_0.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_0.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_0.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_0.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_1.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_1.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_1.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.experts.expert_1.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.ffn.router.classifier.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.15.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.16.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.17.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.18.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_0.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_0.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_0.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_0.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_1.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_1.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_1.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.experts.expert_1.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.ffn.router.classifier.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.19.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.2.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.20.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.21.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.22.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_0.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_0.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_0.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_0.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_1.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_1.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_1.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.experts.expert_1.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.ffn.router.classifier.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.23.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_0.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_0.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_0.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_0.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_1.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_1.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_1.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.experts.expert_1.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.ffn.router.classifier.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.3.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.4.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.5.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.6.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_0.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_0.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_0.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_0.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_1.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_1.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_1.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.experts.expert_1.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.ffn.router.classifier.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.7.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.8.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.cross_attention_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.ff_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.ff_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.ffn.fc1.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.ffn.fc1.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.ffn.fc2.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.ffn.fc2.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
"decoder.layers.9.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
"encoder.embed_positions.weights": "pytorch_model-00001-of-00002.bin",
"encoder.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.0.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.1.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.10.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_0.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_0.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_0.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_0.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_1.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_1.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_1.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.experts.expert_1.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.ffn.router.classifier.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.11.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.12.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.13.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.14.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_0.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_0.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_0.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_0.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_1.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_1.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_1.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.experts.expert_1.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.ffn.router.classifier.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.15.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.16.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.17.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.18.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_0.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_0.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_0.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_0.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_1.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_1.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_1.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.experts.expert_1.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.ffn.router.classifier.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.19.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.2.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.20.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.21.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.22.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_0.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_0.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_0.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_0.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_1.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_1.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_1.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.experts.expert_1.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.ffn.router.classifier.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.23.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_0.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_0.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_0.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_0.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_1.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_1.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_1.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.experts.expert_1.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.ffn.router.classifier.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.3.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.4.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.5.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.6.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_0.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_0.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_0.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_0.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_1.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_1.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_1.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.experts.expert_1.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.ffn.router.classifier.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.7.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.8.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.ff_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.ff_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.ffn.fc1.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.ffn.fc1.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.ffn.fc2.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.ffn.fc2.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
"encoder.layers.9.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
"shared.weight": "pytorch_model-00001-of-00002.bin"
}
}