BlackSamorez commited on
Commit
2f1b2e8
1 Parent(s): f79aea0

index and config fixes

Browse files
Files changed (2) hide show
  1. config.json +4 -3
  2. pytorch_model.bin.index.json +1 -0
config.json CHANGED
@@ -1,8 +1,7 @@
1
  {
2
  "architectures": [
3
- "RWForCausalLM"
4
  ],
5
- "attention_dropout": 0.0,
6
  "auto_map": {
7
  "AutoConfig": "configuration_yalm.YalmConfig",
8
  "AutoModel": "modelling_yalm.YalmModel",
@@ -17,11 +16,13 @@
17
  "num_attention_heads": 128,
18
  "scale_attn_by_inverse_layer_idx": true,
19
  "activation_type": "geglu",
20
- "model_type": "YaLM",
21
  "max_position_embeddings": 1024,
22
  "apply_residual_connection_post_layernorm": false,
23
  "initializer_range": 0.02,
24
  "layernorm_epsilon": 1e-5,
 
 
25
  "torch_dtype": "float16",
26
  "transformers_version": "4.32.1"
27
  }
 
1
  {
2
  "architectures": [
3
+ "YalmCausalLM"
4
  ],
 
5
  "auto_map": {
6
  "AutoConfig": "configuration_yalm.YalmConfig",
7
  "AutoModel": "modelling_yalm.YalmModel",
 
16
  "num_attention_heads": 128,
17
  "scale_attn_by_inverse_layer_idx": true,
18
  "activation_type": "geglu",
19
+ "model_type": "yalm",
20
  "max_position_embeddings": 1024,
21
  "apply_residual_connection_post_layernorm": false,
22
  "initializer_range": 0.02,
23
  "layernorm_epsilon": 1e-5,
24
+ "attention_dropout": 0.1,
25
+ "hidden_dropout": 0.1,
26
  "torch_dtype": "float16",
27
  "transformers_version": "4.32.1"
28
  }
pytorch_model.bin.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {}, "weight_map": {"yalm.embed_tokens.weight": "pytorch_model-00-of-84.pt", "yalm.projector.input_layernorm.weight": "pytorch_model-01-of-84.pt", "yalm.projector.input_layernorm.bias": "pytorch_model-01-of-84.pt", "yalm.transformer.layers.0.attention.query_key_value.weight": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.attention.dense.weight": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.mlp.dense_ffn_hidden.weight": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.mlp.dense_ffn_gate.weight": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.mlp.dense_ffn_output.weight": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.attention.query_key_value.bias": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.attention.dense.bias": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.mlp.dense_ffn_hidden.bias": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.mlp.dense_ffn_gate.bias": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.0.mlp.dense_ffn_output.bias": "pytorch_model-03-of-84.pt", "yalm.transformer.layers.1.attention.query_key_value.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.attention.dense.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.mlp.dense_ffn_hidden.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.mlp.dense_ffn_gate.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.mlp.dense_ffn_output.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.attention.query_key_value.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.attention.dense.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.mlp.dense_ffn_hidden.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.mlp.dense_ffn_gate.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.mlp.dense_ffn_output.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.input_layernorm.weight": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.1.input_layernorm.bias": "pytorch_model-04-of-84.pt", "yalm.transformer.layers.2.attention.query_key_value.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.attention.dense.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.mlp.dense_ffn_hidden.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.mlp.dense_ffn_gate.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.mlp.dense_ffn_output.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.attention.query_key_value.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.attention.dense.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.mlp.dense_ffn_hidden.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.mlp.dense_ffn_gate.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.mlp.dense_ffn_output.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.input_layernorm.weight": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.2.input_layernorm.bias": "pytorch_model-05-of-84.pt", "yalm.transformer.layers.3.attention.query_key_value.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.attention.dense.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.mlp.dense_ffn_hidden.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.mlp.dense_ffn_gate.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.mlp.dense_ffn_output.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.attention.query_key_value.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.attention.dense.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.mlp.dense_ffn_hidden.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.mlp.dense_ffn_gate.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.mlp.dense_ffn_output.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.input_layernorm.weight": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.3.input_layernorm.bias": "pytorch_model-06-of-84.pt", "yalm.transformer.layers.4.attention.query_key_value.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.attention.dense.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.mlp.dense_ffn_hidden.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.mlp.dense_ffn_gate.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.mlp.dense_ffn_output.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.attention.query_key_value.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.attention.dense.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.mlp.dense_ffn_hidden.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.mlp.dense_ffn_gate.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.mlp.dense_ffn_output.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.input_layernorm.weight": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.4.input_layernorm.bias": "pytorch_model-07-of-84.pt", "yalm.transformer.layers.5.attention.query_key_value.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.attention.dense.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.mlp.dense_ffn_hidden.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.mlp.dense_ffn_gate.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.mlp.dense_ffn_output.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.attention.query_key_value.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.attention.dense.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.mlp.dense_ffn_hidden.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.mlp.dense_ffn_gate.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.mlp.dense_ffn_output.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.input_layernorm.weight": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.5.input_layernorm.bias": "pytorch_model-08-of-84.pt", "yalm.transformer.layers.6.attention.query_key_value.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.attention.dense.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.mlp.dense_ffn_hidden.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.mlp.dense_ffn_gate.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.mlp.dense_ffn_output.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.attention.query_key_value.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.attention.dense.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.mlp.dense_ffn_hidden.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.mlp.dense_ffn_gate.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.mlp.dense_ffn_output.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.input_layernorm.weight": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.6.input_layernorm.bias": "pytorch_model-09-of-84.pt", "yalm.transformer.layers.7.attention.query_key_value.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.attention.dense.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.mlp.dense_ffn_hidden.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.mlp.dense_ffn_gate.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.mlp.dense_ffn_output.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.attention.query_key_value.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.attention.dense.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.mlp.dense_ffn_hidden.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.mlp.dense_ffn_gate.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.mlp.dense_ffn_output.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.input_layernorm.weight": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.7.input_layernorm.bias": "pytorch_model-10-of-84.pt", "yalm.transformer.layers.8.attention.query_key_value.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.attention.dense.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.mlp.dense_ffn_hidden.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.mlp.dense_ffn_gate.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.mlp.dense_ffn_output.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.attention.query_key_value.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.attention.dense.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.mlp.dense_ffn_hidden.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.mlp.dense_ffn_gate.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.mlp.dense_ffn_output.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.input_layernorm.weight": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.8.input_layernorm.bias": "pytorch_model-11-of-84.pt", "yalm.transformer.layers.9.attention.query_key_value.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.attention.dense.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.mlp.dense_ffn_hidden.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.mlp.dense_ffn_gate.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.mlp.dense_ffn_output.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.attention.query_key_value.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.attention.dense.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.mlp.dense_ffn_hidden.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.mlp.dense_ffn_gate.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.mlp.dense_ffn_output.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.input_layernorm.weight": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.9.input_layernorm.bias": "pytorch_model-12-of-84.pt", "yalm.transformer.layers.10.attention.query_key_value.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.attention.dense.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.mlp.dense_ffn_hidden.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.mlp.dense_ffn_gate.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.mlp.dense_ffn_output.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.attention.query_key_value.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.attention.dense.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.mlp.dense_ffn_hidden.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.mlp.dense_ffn_gate.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.mlp.dense_ffn_output.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.input_layernorm.weight": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.10.input_layernorm.bias": "pytorch_model-13-of-84.pt", "yalm.transformer.layers.11.attention.query_key_value.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.attention.dense.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.mlp.dense_ffn_hidden.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.mlp.dense_ffn_gate.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.mlp.dense_ffn_output.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.attention.query_key_value.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.attention.dense.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.mlp.dense_ffn_hidden.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.mlp.dense_ffn_gate.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.mlp.dense_ffn_output.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.input_layernorm.weight": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.11.input_layernorm.bias": "pytorch_model-14-of-84.pt", "yalm.transformer.layers.12.attention.query_key_value.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.attention.dense.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.mlp.dense_ffn_hidden.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.mlp.dense_ffn_gate.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.mlp.dense_ffn_output.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.attention.query_key_value.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.attention.dense.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.mlp.dense_ffn_hidden.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.mlp.dense_ffn_gate.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.mlp.dense_ffn_output.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.input_layernorm.weight": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.12.input_layernorm.bias": "pytorch_model-15-of-84.pt", "yalm.transformer.layers.13.attention.query_key_value.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.attention.dense.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.mlp.dense_ffn_hidden.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.mlp.dense_ffn_gate.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.mlp.dense_ffn_output.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.attention.query_key_value.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.attention.dense.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.mlp.dense_ffn_hidden.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.mlp.dense_ffn_gate.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.mlp.dense_ffn_output.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.input_layernorm.weight": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.13.input_layernorm.bias": "pytorch_model-16-of-84.pt", "yalm.transformer.layers.14.attention.query_key_value.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.attention.dense.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.mlp.dense_ffn_hidden.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.mlp.dense_ffn_gate.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.mlp.dense_ffn_output.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.attention.query_key_value.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.attention.dense.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.mlp.dense_ffn_hidden.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.mlp.dense_ffn_gate.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.mlp.dense_ffn_output.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.input_layernorm.weight": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.14.input_layernorm.bias": "pytorch_model-17-of-84.pt", "yalm.transformer.layers.15.attention.query_key_value.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.attention.dense.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.mlp.dense_ffn_hidden.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.mlp.dense_ffn_gate.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.mlp.dense_ffn_output.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.attention.query_key_value.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.attention.dense.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.mlp.dense_ffn_hidden.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.mlp.dense_ffn_gate.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.mlp.dense_ffn_output.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.input_layernorm.weight": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.15.input_layernorm.bias": "pytorch_model-18-of-84.pt", "yalm.transformer.layers.16.attention.query_key_value.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.attention.dense.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.mlp.dense_ffn_hidden.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.mlp.dense_ffn_gate.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.mlp.dense_ffn_output.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.attention.query_key_value.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.attention.dense.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.mlp.dense_ffn_hidden.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.mlp.dense_ffn_gate.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.mlp.dense_ffn_output.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.input_layernorm.weight": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.16.input_layernorm.bias": "pytorch_model-19-of-84.pt", "yalm.transformer.layers.17.attention.query_key_value.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.attention.dense.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.mlp.dense_ffn_hidden.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.mlp.dense_ffn_gate.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.mlp.dense_ffn_output.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.attention.query_key_value.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.attention.dense.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.mlp.dense_ffn_hidden.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.mlp.dense_ffn_gate.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.mlp.dense_ffn_output.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.input_layernorm.weight": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.17.input_layernorm.bias": "pytorch_model-20-of-84.pt", "yalm.transformer.layers.18.attention.query_key_value.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.attention.dense.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.mlp.dense_ffn_hidden.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.mlp.dense_ffn_gate.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.mlp.dense_ffn_output.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.attention.query_key_value.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.attention.dense.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.mlp.dense_ffn_hidden.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.mlp.dense_ffn_gate.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.mlp.dense_ffn_output.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.input_layernorm.weight": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.18.input_layernorm.bias": "pytorch_model-21-of-84.pt", "yalm.transformer.layers.19.attention.query_key_value.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.attention.dense.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.mlp.dense_ffn_hidden.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.mlp.dense_ffn_gate.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.mlp.dense_ffn_output.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.attention.query_key_value.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.attention.dense.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.mlp.dense_ffn_hidden.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.mlp.dense_ffn_gate.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.mlp.dense_ffn_output.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.input_layernorm.weight": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.19.input_layernorm.bias": "pytorch_model-22-of-84.pt", "yalm.transformer.layers.20.attention.query_key_value.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.attention.dense.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.mlp.dense_ffn_hidden.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.mlp.dense_ffn_gate.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.mlp.dense_ffn_output.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.attention.query_key_value.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.attention.dense.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.mlp.dense_ffn_hidden.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.mlp.dense_ffn_gate.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.mlp.dense_ffn_output.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.input_layernorm.weight": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.20.input_layernorm.bias": "pytorch_model-23-of-84.pt", "yalm.transformer.layers.21.attention.query_key_value.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.attention.dense.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.mlp.dense_ffn_hidden.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.mlp.dense_ffn_gate.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.mlp.dense_ffn_output.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.attention.query_key_value.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.attention.dense.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.mlp.dense_ffn_hidden.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.mlp.dense_ffn_gate.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.mlp.dense_ffn_output.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.input_layernorm.weight": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.21.input_layernorm.bias": "pytorch_model-24-of-84.pt", "yalm.transformer.layers.22.attention.query_key_value.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.attention.dense.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.mlp.dense_ffn_hidden.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.mlp.dense_ffn_gate.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.mlp.dense_ffn_output.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.attention.query_key_value.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.attention.dense.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.mlp.dense_ffn_hidden.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.mlp.dense_ffn_gate.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.mlp.dense_ffn_output.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.input_layernorm.weight": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.22.input_layernorm.bias": "pytorch_model-25-of-84.pt", "yalm.transformer.layers.23.attention.query_key_value.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.attention.dense.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.mlp.dense_ffn_hidden.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.mlp.dense_ffn_gate.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.mlp.dense_ffn_output.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.attention.query_key_value.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.attention.dense.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.mlp.dense_ffn_hidden.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.mlp.dense_ffn_gate.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.mlp.dense_ffn_output.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.input_layernorm.weight": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.23.input_layernorm.bias": "pytorch_model-26-of-84.pt", "yalm.transformer.layers.24.attention.query_key_value.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.attention.dense.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.mlp.dense_ffn_hidden.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.mlp.dense_ffn_gate.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.mlp.dense_ffn_output.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.attention.query_key_value.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.attention.dense.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.mlp.dense_ffn_hidden.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.mlp.dense_ffn_gate.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.mlp.dense_ffn_output.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.input_layernorm.weight": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.24.input_layernorm.bias": "pytorch_model-27-of-84.pt", "yalm.transformer.layers.25.attention.query_key_value.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.attention.dense.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.mlp.dense_ffn_hidden.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.mlp.dense_ffn_gate.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.mlp.dense_ffn_output.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.attention.query_key_value.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.attention.dense.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.mlp.dense_ffn_hidden.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.mlp.dense_ffn_gate.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.mlp.dense_ffn_output.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.input_layernorm.weight": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.25.input_layernorm.bias": "pytorch_model-28-of-84.pt", "yalm.transformer.layers.26.attention.query_key_value.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.attention.dense.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.mlp.dense_ffn_hidden.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.mlp.dense_ffn_gate.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.mlp.dense_ffn_output.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.attention.query_key_value.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.attention.dense.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.mlp.dense_ffn_hidden.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.mlp.dense_ffn_gate.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.mlp.dense_ffn_output.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.input_layernorm.weight": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.26.input_layernorm.bias": "pytorch_model-29-of-84.pt", "yalm.transformer.layers.27.attention.query_key_value.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.attention.dense.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.mlp.dense_ffn_hidden.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.mlp.dense_ffn_gate.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.mlp.dense_ffn_output.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.attention.query_key_value.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.attention.dense.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.mlp.dense_ffn_hidden.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.mlp.dense_ffn_gate.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.mlp.dense_ffn_output.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.input_layernorm.weight": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.27.input_layernorm.bias": "pytorch_model-30-of-84.pt", "yalm.transformer.layers.28.attention.query_key_value.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.attention.dense.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.mlp.dense_ffn_hidden.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.mlp.dense_ffn_gate.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.mlp.dense_ffn_output.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.attention.query_key_value.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.attention.dense.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.post_attention_layernorm.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.post_attention_layernorm.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.mlp.dense_ffn_hidden.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.mlp.dense_ffn_gate.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.mlp.dense_ffn_output.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.input_layernorm.weight": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.28.input_layernorm.bias": "pytorch_model-31-of-84.pt", "yalm.transformer.layers.29.attention.query_key_value.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.attention.dense.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.mlp.dense_ffn_hidden.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.mlp.dense_ffn_gate.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.mlp.dense_ffn_output.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.attention.query_key_value.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.attention.dense.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.post_attention_layernorm.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.post_attention_layernorm.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.mlp.dense_ffn_hidden.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.mlp.dense_ffn_gate.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.mlp.dense_ffn_output.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.input_layernorm.weight": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.29.input_layernorm.bias": "pytorch_model-32-of-84.pt", "yalm.transformer.layers.30.attention.query_key_value.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.attention.dense.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.mlp.dense_ffn_hidden.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.mlp.dense_ffn_gate.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.mlp.dense_ffn_output.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.attention.query_key_value.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.attention.dense.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.post_attention_layernorm.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.post_attention_layernorm.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.mlp.dense_ffn_hidden.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.mlp.dense_ffn_gate.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.mlp.dense_ffn_output.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.input_layernorm.weight": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.30.input_layernorm.bias": "pytorch_model-33-of-84.pt", "yalm.transformer.layers.31.attention.query_key_value.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.attention.dense.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.mlp.dense_ffn_hidden.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.mlp.dense_ffn_gate.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.mlp.dense_ffn_output.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.attention.query_key_value.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.attention.dense.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.post_attention_layernorm.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.post_attention_layernorm.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.mlp.dense_ffn_hidden.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.mlp.dense_ffn_gate.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.mlp.dense_ffn_output.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.input_layernorm.weight": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.31.input_layernorm.bias": "pytorch_model-34-of-84.pt", "yalm.transformer.layers.32.attention.query_key_value.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.attention.dense.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.mlp.dense_ffn_hidden.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.mlp.dense_ffn_gate.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.mlp.dense_ffn_output.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.attention.query_key_value.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.attention.dense.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.post_attention_layernorm.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.post_attention_layernorm.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.mlp.dense_ffn_hidden.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.mlp.dense_ffn_gate.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.mlp.dense_ffn_output.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.input_layernorm.weight": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.32.input_layernorm.bias": "pytorch_model-35-of-84.pt", "yalm.transformer.layers.33.attention.query_key_value.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.attention.dense.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.mlp.dense_ffn_hidden.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.mlp.dense_ffn_gate.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.mlp.dense_ffn_output.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.attention.query_key_value.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.attention.dense.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.post_attention_layernorm.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.post_attention_layernorm.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.mlp.dense_ffn_hidden.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.mlp.dense_ffn_gate.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.mlp.dense_ffn_output.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.input_layernorm.weight": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.33.input_layernorm.bias": "pytorch_model-36-of-84.pt", "yalm.transformer.layers.34.attention.query_key_value.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.attention.dense.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.mlp.dense_ffn_hidden.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.mlp.dense_ffn_gate.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.mlp.dense_ffn_output.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.attention.query_key_value.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.attention.dense.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.post_attention_layernorm.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.post_attention_layernorm.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.mlp.dense_ffn_hidden.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.mlp.dense_ffn_gate.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.mlp.dense_ffn_output.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.input_layernorm.weight": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.34.input_layernorm.bias": "pytorch_model-37-of-84.pt", "yalm.transformer.layers.35.attention.query_key_value.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.attention.dense.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.mlp.dense_ffn_hidden.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.mlp.dense_ffn_gate.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.mlp.dense_ffn_output.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.attention.query_key_value.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.attention.dense.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.post_attention_layernorm.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.post_attention_layernorm.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.mlp.dense_ffn_hidden.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.mlp.dense_ffn_gate.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.mlp.dense_ffn_output.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.input_layernorm.weight": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.35.input_layernorm.bias": "pytorch_model-38-of-84.pt", "yalm.transformer.layers.36.attention.query_key_value.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.attention.dense.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.mlp.dense_ffn_hidden.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.mlp.dense_ffn_gate.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.mlp.dense_ffn_output.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.attention.query_key_value.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.attention.dense.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.post_attention_layernorm.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.post_attention_layernorm.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.mlp.dense_ffn_hidden.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.mlp.dense_ffn_gate.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.mlp.dense_ffn_output.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.input_layernorm.weight": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.36.input_layernorm.bias": "pytorch_model-39-of-84.pt", "yalm.transformer.layers.37.attention.query_key_value.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.attention.dense.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.mlp.dense_ffn_hidden.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.mlp.dense_ffn_gate.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.mlp.dense_ffn_output.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.attention.query_key_value.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.attention.dense.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.post_attention_layernorm.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.post_attention_layernorm.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.mlp.dense_ffn_hidden.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.mlp.dense_ffn_gate.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.mlp.dense_ffn_output.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.input_layernorm.weight": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.37.input_layernorm.bias": "pytorch_model-40-of-84.pt", "yalm.transformer.layers.38.attention.query_key_value.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.attention.dense.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.mlp.dense_ffn_hidden.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.mlp.dense_ffn_gate.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.mlp.dense_ffn_output.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.attention.query_key_value.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.attention.dense.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.post_attention_layernorm.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.post_attention_layernorm.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.mlp.dense_ffn_hidden.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.mlp.dense_ffn_gate.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.mlp.dense_ffn_output.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.input_layernorm.weight": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.38.input_layernorm.bias": "pytorch_model-41-of-84.pt", "yalm.transformer.layers.39.attention.query_key_value.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.attention.dense.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.mlp.dense_ffn_hidden.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.mlp.dense_ffn_gate.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.mlp.dense_ffn_output.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.attention.query_key_value.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.attention.dense.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.post_attention_layernorm.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.post_attention_layernorm.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.mlp.dense_ffn_hidden.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.mlp.dense_ffn_gate.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.mlp.dense_ffn_output.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.input_layernorm.weight": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.39.input_layernorm.bias": "pytorch_model-42-of-84.pt", "yalm.transformer.layers.40.attention.query_key_value.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.attention.dense.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.mlp.dense_ffn_hidden.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.mlp.dense_ffn_gate.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.mlp.dense_ffn_output.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.attention.query_key_value.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.attention.dense.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.post_attention_layernorm.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.post_attention_layernorm.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.mlp.dense_ffn_hidden.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.mlp.dense_ffn_gate.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.mlp.dense_ffn_output.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.input_layernorm.weight": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.40.input_layernorm.bias": "pytorch_model-43-of-84.pt", "yalm.transformer.layers.41.attention.query_key_value.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.attention.dense.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.mlp.dense_ffn_hidden.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.mlp.dense_ffn_gate.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.mlp.dense_ffn_output.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.attention.query_key_value.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.attention.dense.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.post_attention_layernorm.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.post_attention_layernorm.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.mlp.dense_ffn_hidden.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.mlp.dense_ffn_gate.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.mlp.dense_ffn_output.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.input_layernorm.weight": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.41.input_layernorm.bias": "pytorch_model-44-of-84.pt", "yalm.transformer.layers.42.attention.query_key_value.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.attention.dense.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.mlp.dense_ffn_hidden.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.mlp.dense_ffn_gate.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.mlp.dense_ffn_output.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.attention.query_key_value.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.attention.dense.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.post_attention_layernorm.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.post_attention_layernorm.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.mlp.dense_ffn_hidden.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.mlp.dense_ffn_gate.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.mlp.dense_ffn_output.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.input_layernorm.weight": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.42.input_layernorm.bias": "pytorch_model-45-of-84.pt", "yalm.transformer.layers.43.attention.query_key_value.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.attention.dense.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.mlp.dense_ffn_hidden.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.mlp.dense_ffn_gate.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.mlp.dense_ffn_output.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.attention.query_key_value.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.attention.dense.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.post_attention_layernorm.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.post_attention_layernorm.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.mlp.dense_ffn_hidden.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.mlp.dense_ffn_gate.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.mlp.dense_ffn_output.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.input_layernorm.weight": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.43.input_layernorm.bias": "pytorch_model-46-of-84.pt", "yalm.transformer.layers.44.attention.query_key_value.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.attention.dense.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.mlp.dense_ffn_hidden.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.mlp.dense_ffn_gate.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.mlp.dense_ffn_output.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.attention.query_key_value.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.attention.dense.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.post_attention_layernorm.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.post_attention_layernorm.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.mlp.dense_ffn_hidden.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.mlp.dense_ffn_gate.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.mlp.dense_ffn_output.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.input_layernorm.weight": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.44.input_layernorm.bias": "pytorch_model-47-of-84.pt", "yalm.transformer.layers.45.attention.query_key_value.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.attention.dense.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.mlp.dense_ffn_hidden.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.mlp.dense_ffn_gate.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.mlp.dense_ffn_output.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.attention.query_key_value.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.attention.dense.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.post_attention_layernorm.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.post_attention_layernorm.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.mlp.dense_ffn_hidden.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.mlp.dense_ffn_gate.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.mlp.dense_ffn_output.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.input_layernorm.weight": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.45.input_layernorm.bias": "pytorch_model-48-of-84.pt", "yalm.transformer.layers.46.attention.query_key_value.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.attention.dense.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.mlp.dense_ffn_hidden.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.mlp.dense_ffn_gate.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.mlp.dense_ffn_output.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.attention.query_key_value.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.attention.dense.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.post_attention_layernorm.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.post_attention_layernorm.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.mlp.dense_ffn_hidden.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.mlp.dense_ffn_gate.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.mlp.dense_ffn_output.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.input_layernorm.weight": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.46.input_layernorm.bias": "pytorch_model-49-of-84.pt", "yalm.transformer.layers.47.attention.query_key_value.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.attention.dense.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.mlp.dense_ffn_hidden.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.mlp.dense_ffn_gate.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.mlp.dense_ffn_output.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.attention.query_key_value.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.attention.dense.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.post_attention_layernorm.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.post_attention_layernorm.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.mlp.dense_ffn_hidden.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.mlp.dense_ffn_gate.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.mlp.dense_ffn_output.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.input_layernorm.weight": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.47.input_layernorm.bias": "pytorch_model-50-of-84.pt", "yalm.transformer.layers.48.attention.query_key_value.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.attention.dense.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.mlp.dense_ffn_hidden.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.mlp.dense_ffn_gate.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.mlp.dense_ffn_output.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.attention.query_key_value.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.attention.dense.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.post_attention_layernorm.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.post_attention_layernorm.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.mlp.dense_ffn_hidden.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.mlp.dense_ffn_gate.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.mlp.dense_ffn_output.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.input_layernorm.weight": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.48.input_layernorm.bias": "pytorch_model-51-of-84.pt", "yalm.transformer.layers.49.attention.query_key_value.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.attention.dense.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.mlp.dense_ffn_hidden.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.mlp.dense_ffn_gate.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.mlp.dense_ffn_output.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.attention.query_key_value.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.attention.dense.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.post_attention_layernorm.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.post_attention_layernorm.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.mlp.dense_ffn_hidden.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.mlp.dense_ffn_gate.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.mlp.dense_ffn_output.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.input_layernorm.weight": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.49.input_layernorm.bias": "pytorch_model-52-of-84.pt", "yalm.transformer.layers.50.attention.query_key_value.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.attention.dense.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.mlp.dense_ffn_hidden.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.mlp.dense_ffn_gate.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.mlp.dense_ffn_output.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.attention.query_key_value.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.attention.dense.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.post_attention_layernorm.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.post_attention_layernorm.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.mlp.dense_ffn_hidden.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.mlp.dense_ffn_gate.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.mlp.dense_ffn_output.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.input_layernorm.weight": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.50.input_layernorm.bias": "pytorch_model-53-of-84.pt", "yalm.transformer.layers.51.attention.query_key_value.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.attention.dense.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.mlp.dense_ffn_hidden.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.mlp.dense_ffn_gate.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.mlp.dense_ffn_output.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.attention.query_key_value.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.attention.dense.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.post_attention_layernorm.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.post_attention_layernorm.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.mlp.dense_ffn_hidden.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.mlp.dense_ffn_gate.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.mlp.dense_ffn_output.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.input_layernorm.weight": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.51.input_layernorm.bias": "pytorch_model-54-of-84.pt", "yalm.transformer.layers.52.attention.query_key_value.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.attention.dense.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.mlp.dense_ffn_hidden.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.mlp.dense_ffn_gate.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.mlp.dense_ffn_output.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.attention.query_key_value.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.attention.dense.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.post_attention_layernorm.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.post_attention_layernorm.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.mlp.dense_ffn_hidden.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.mlp.dense_ffn_gate.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.mlp.dense_ffn_output.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.input_layernorm.weight": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.52.input_layernorm.bias": "pytorch_model-55-of-84.pt", "yalm.transformer.layers.53.attention.query_key_value.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.attention.dense.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.mlp.dense_ffn_hidden.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.mlp.dense_ffn_gate.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.mlp.dense_ffn_output.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.attention.query_key_value.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.attention.dense.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.post_attention_layernorm.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.post_attention_layernorm.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.mlp.dense_ffn_hidden.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.mlp.dense_ffn_gate.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.mlp.dense_ffn_output.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.input_layernorm.weight": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.53.input_layernorm.bias": "pytorch_model-56-of-84.pt", "yalm.transformer.layers.54.attention.query_key_value.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.attention.dense.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.mlp.dense_ffn_hidden.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.mlp.dense_ffn_gate.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.mlp.dense_ffn_output.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.attention.query_key_value.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.attention.dense.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.post_attention_layernorm.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.post_attention_layernorm.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.mlp.dense_ffn_hidden.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.mlp.dense_ffn_gate.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.mlp.dense_ffn_output.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.input_layernorm.weight": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.54.input_layernorm.bias": "pytorch_model-57-of-84.pt", "yalm.transformer.layers.55.attention.query_key_value.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.attention.dense.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.mlp.dense_ffn_hidden.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.mlp.dense_ffn_gate.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.mlp.dense_ffn_output.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.attention.query_key_value.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.attention.dense.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.post_attention_layernorm.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.post_attention_layernorm.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.mlp.dense_ffn_hidden.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.mlp.dense_ffn_gate.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.mlp.dense_ffn_output.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.input_layernorm.weight": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.55.input_layernorm.bias": "pytorch_model-58-of-84.pt", "yalm.transformer.layers.56.attention.query_key_value.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.attention.dense.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.mlp.dense_ffn_hidden.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.mlp.dense_ffn_gate.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.mlp.dense_ffn_output.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.attention.query_key_value.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.attention.dense.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.post_attention_layernorm.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.post_attention_layernorm.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.mlp.dense_ffn_hidden.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.mlp.dense_ffn_gate.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.mlp.dense_ffn_output.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.input_layernorm.weight": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.56.input_layernorm.bias": "pytorch_model-59-of-84.pt", "yalm.transformer.layers.57.attention.query_key_value.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.attention.dense.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.mlp.dense_ffn_hidden.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.mlp.dense_ffn_gate.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.mlp.dense_ffn_output.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.attention.query_key_value.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.attention.dense.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.post_attention_layernorm.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.post_attention_layernorm.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.mlp.dense_ffn_hidden.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.mlp.dense_ffn_gate.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.mlp.dense_ffn_output.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.input_layernorm.weight": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.57.input_layernorm.bias": "pytorch_model-60-of-84.pt", "yalm.transformer.layers.58.attention.query_key_value.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.attention.dense.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.mlp.dense_ffn_hidden.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.mlp.dense_ffn_gate.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.mlp.dense_ffn_output.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.attention.query_key_value.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.attention.dense.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.post_attention_layernorm.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.post_attention_layernorm.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.mlp.dense_ffn_hidden.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.mlp.dense_ffn_gate.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.mlp.dense_ffn_output.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.input_layernorm.weight": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.58.input_layernorm.bias": "pytorch_model-61-of-84.pt", "yalm.transformer.layers.59.attention.query_key_value.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.attention.dense.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.mlp.dense_ffn_hidden.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.mlp.dense_ffn_gate.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.mlp.dense_ffn_output.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.attention.query_key_value.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.attention.dense.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.post_attention_layernorm.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.post_attention_layernorm.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.mlp.dense_ffn_hidden.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.mlp.dense_ffn_gate.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.mlp.dense_ffn_output.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.input_layernorm.weight": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.59.input_layernorm.bias": "pytorch_model-62-of-84.pt", "yalm.transformer.layers.60.attention.query_key_value.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.attention.dense.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.mlp.dense_ffn_hidden.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.mlp.dense_ffn_gate.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.mlp.dense_ffn_output.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.attention.query_key_value.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.attention.dense.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.post_attention_layernorm.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.post_attention_layernorm.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.mlp.dense_ffn_hidden.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.mlp.dense_ffn_gate.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.mlp.dense_ffn_output.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.input_layernorm.weight": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.60.input_layernorm.bias": "pytorch_model-63-of-84.pt", "yalm.transformer.layers.61.attention.query_key_value.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.attention.dense.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.mlp.dense_ffn_hidden.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.mlp.dense_ffn_gate.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.mlp.dense_ffn_output.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.attention.query_key_value.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.attention.dense.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.post_attention_layernorm.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.post_attention_layernorm.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.mlp.dense_ffn_hidden.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.mlp.dense_ffn_gate.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.mlp.dense_ffn_output.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.input_layernorm.weight": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.61.input_layernorm.bias": "pytorch_model-64-of-84.pt", "yalm.transformer.layers.62.attention.query_key_value.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.attention.dense.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.mlp.dense_ffn_hidden.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.mlp.dense_ffn_gate.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.mlp.dense_ffn_output.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.attention.query_key_value.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.attention.dense.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.post_attention_layernorm.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.post_attention_layernorm.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.mlp.dense_ffn_hidden.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.mlp.dense_ffn_gate.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.mlp.dense_ffn_output.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.input_layernorm.weight": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.62.input_layernorm.bias": "pytorch_model-65-of-84.pt", "yalm.transformer.layers.63.attention.query_key_value.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.attention.dense.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.mlp.dense_ffn_hidden.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.mlp.dense_ffn_gate.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.mlp.dense_ffn_output.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.attention.query_key_value.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.attention.dense.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.post_attention_layernorm.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.post_attention_layernorm.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.mlp.dense_ffn_hidden.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.mlp.dense_ffn_gate.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.mlp.dense_ffn_output.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.input_layernorm.weight": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.63.input_layernorm.bias": "pytorch_model-66-of-84.pt", "yalm.transformer.layers.64.attention.query_key_value.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.attention.dense.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.mlp.dense_ffn_hidden.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.mlp.dense_ffn_gate.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.mlp.dense_ffn_output.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.attention.query_key_value.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.attention.dense.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.post_attention_layernorm.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.post_attention_layernorm.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.mlp.dense_ffn_hidden.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.mlp.dense_ffn_gate.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.mlp.dense_ffn_output.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.input_layernorm.weight": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.64.input_layernorm.bias": "pytorch_model-67-of-84.pt", "yalm.transformer.layers.65.attention.query_key_value.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.attention.dense.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.mlp.dense_ffn_hidden.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.mlp.dense_ffn_gate.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.mlp.dense_ffn_output.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.attention.query_key_value.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.attention.dense.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.post_attention_layernorm.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.post_attention_layernorm.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.mlp.dense_ffn_hidden.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.mlp.dense_ffn_gate.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.mlp.dense_ffn_output.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.input_layernorm.weight": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.65.input_layernorm.bias": "pytorch_model-68-of-84.pt", "yalm.transformer.layers.66.attention.query_key_value.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.attention.dense.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.mlp.dense_ffn_hidden.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.mlp.dense_ffn_gate.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.mlp.dense_ffn_output.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.attention.query_key_value.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.attention.dense.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.post_attention_layernorm.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.post_attention_layernorm.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.mlp.dense_ffn_hidden.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.mlp.dense_ffn_gate.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.mlp.dense_ffn_output.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.input_layernorm.weight": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.66.input_layernorm.bias": "pytorch_model-69-of-84.pt", "yalm.transformer.layers.67.attention.query_key_value.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.attention.dense.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.mlp.dense_ffn_hidden.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.mlp.dense_ffn_gate.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.mlp.dense_ffn_output.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.attention.query_key_value.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.attention.dense.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.post_attention_layernorm.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.post_attention_layernorm.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.mlp.dense_ffn_hidden.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.mlp.dense_ffn_gate.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.mlp.dense_ffn_output.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.input_layernorm.weight": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.67.input_layernorm.bias": "pytorch_model-70-of-84.pt", "yalm.transformer.layers.68.attention.query_key_value.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.attention.dense.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.mlp.dense_ffn_hidden.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.mlp.dense_ffn_gate.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.mlp.dense_ffn_output.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.attention.query_key_value.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.attention.dense.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.post_attention_layernorm.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.post_attention_layernorm.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.mlp.dense_ffn_hidden.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.mlp.dense_ffn_gate.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.mlp.dense_ffn_output.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.input_layernorm.weight": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.68.input_layernorm.bias": "pytorch_model-71-of-84.pt", "yalm.transformer.layers.69.attention.query_key_value.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.attention.dense.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.mlp.dense_ffn_hidden.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.mlp.dense_ffn_gate.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.mlp.dense_ffn_output.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.attention.query_key_value.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.attention.dense.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.post_attention_layernorm.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.post_attention_layernorm.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.mlp.dense_ffn_hidden.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.mlp.dense_ffn_gate.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.mlp.dense_ffn_output.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.input_layernorm.weight": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.69.input_layernorm.bias": "pytorch_model-72-of-84.pt", "yalm.transformer.layers.70.attention.query_key_value.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.attention.dense.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.mlp.dense_ffn_hidden.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.mlp.dense_ffn_gate.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.mlp.dense_ffn_output.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.attention.query_key_value.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.attention.dense.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.post_attention_layernorm.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.post_attention_layernorm.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.mlp.dense_ffn_hidden.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.mlp.dense_ffn_gate.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.mlp.dense_ffn_output.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.input_layernorm.weight": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.70.input_layernorm.bias": "pytorch_model-73-of-84.pt", "yalm.transformer.layers.71.attention.query_key_value.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.attention.dense.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.mlp.dense_ffn_hidden.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.mlp.dense_ffn_gate.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.mlp.dense_ffn_output.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.attention.query_key_value.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.attention.dense.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.post_attention_layernorm.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.post_attention_layernorm.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.mlp.dense_ffn_hidden.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.mlp.dense_ffn_gate.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.mlp.dense_ffn_output.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.input_layernorm.weight": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.71.input_layernorm.bias": "pytorch_model-74-of-84.pt", "yalm.transformer.layers.72.attention.query_key_value.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.attention.dense.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.mlp.dense_ffn_hidden.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.mlp.dense_ffn_gate.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.mlp.dense_ffn_output.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.attention.query_key_value.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.attention.dense.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.post_attention_layernorm.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.post_attention_layernorm.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.mlp.dense_ffn_hidden.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.mlp.dense_ffn_gate.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.mlp.dense_ffn_output.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.input_layernorm.weight": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.72.input_layernorm.bias": "pytorch_model-75-of-84.pt", "yalm.transformer.layers.73.attention.query_key_value.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.attention.dense.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.mlp.dense_ffn_hidden.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.mlp.dense_ffn_gate.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.mlp.dense_ffn_output.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.attention.query_key_value.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.attention.dense.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.post_attention_layernorm.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.post_attention_layernorm.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.mlp.dense_ffn_hidden.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.mlp.dense_ffn_gate.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.mlp.dense_ffn_output.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.input_layernorm.weight": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.73.input_layernorm.bias": "pytorch_model-76-of-84.pt", "yalm.transformer.layers.74.attention.query_key_value.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.attention.dense.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.mlp.dense_ffn_hidden.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.mlp.dense_ffn_gate.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.mlp.dense_ffn_output.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.attention.query_key_value.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.attention.dense.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.post_attention_layernorm.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.post_attention_layernorm.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.mlp.dense_ffn_hidden.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.mlp.dense_ffn_gate.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.mlp.dense_ffn_output.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.input_layernorm.weight": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.74.input_layernorm.bias": "pytorch_model-77-of-84.pt", "yalm.transformer.layers.75.attention.query_key_value.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.attention.dense.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.mlp.dense_ffn_hidden.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.mlp.dense_ffn_gate.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.mlp.dense_ffn_output.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.attention.query_key_value.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.attention.dense.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.post_attention_layernorm.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.post_attention_layernorm.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.mlp.dense_ffn_hidden.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.mlp.dense_ffn_gate.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.mlp.dense_ffn_output.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.input_layernorm.weight": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.75.input_layernorm.bias": "pytorch_model-78-of-84.pt", "yalm.transformer.layers.76.attention.query_key_value.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.attention.dense.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.mlp.dense_ffn_hidden.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.mlp.dense_ffn_gate.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.mlp.dense_ffn_output.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.attention.query_key_value.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.attention.dense.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.post_attention_layernorm.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.post_attention_layernorm.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.mlp.dense_ffn_hidden.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.mlp.dense_ffn_gate.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.mlp.dense_ffn_output.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.input_layernorm.weight": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.76.input_layernorm.bias": "pytorch_model-79-of-84.pt", "yalm.transformer.layers.77.attention.query_key_value.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.attention.dense.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.mlp.dense_ffn_hidden.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.mlp.dense_ffn_gate.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.mlp.dense_ffn_output.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.attention.query_key_value.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.attention.dense.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.post_attention_layernorm.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.post_attention_layernorm.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.mlp.dense_ffn_hidden.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.mlp.dense_ffn_gate.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.mlp.dense_ffn_output.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.input_layernorm.weight": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.77.input_layernorm.bias": "pytorch_model-80-of-84.pt", "yalm.transformer.layers.78.attention.query_key_value.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.attention.dense.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.mlp.dense_ffn_hidden.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.mlp.dense_ffn_gate.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.mlp.dense_ffn_output.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.attention.query_key_value.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.attention.dense.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.post_attention_layernorm.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.post_attention_layernorm.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.mlp.dense_ffn_hidden.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.mlp.dense_ffn_gate.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.mlp.dense_ffn_output.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.input_layernorm.weight": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.78.input_layernorm.bias": "pytorch_model-81-of-84.pt", "yalm.transformer.layers.79.attention.query_key_value.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.attention.dense.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.mlp.dense_ffn_hidden.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.mlp.dense_ffn_gate.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.mlp.dense_ffn_output.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.attention.query_key_value.bias": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.attention.dense.bias": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.post_attention_layernorm.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.post_attention_layernorm.bias": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.mlp.dense_ffn_hidden.bias": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.mlp.dense_ffn_gate.bias": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.mlp.dense_ffn_output.bias": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.input_layernorm.weight": "pytorch_model-82-of-84.pt", "yalm.transformer.layers.79.input_layernorm.bias": "pytorch_model-82-of-84.pt", "yalm.output_layer.input_layer_norm.weight": "pytorch_model-84-of-84.pt", "yalm.output_layer.input_layer_norm.bias": "pytorch_model-84-of-84.pt", "yalm.output_layer.dense.weight": "pytorch_model-84-of-84.pt", "yalm.output_layer.dense.bias": "pytorch_model-84-of-84.pt", "yalm.output_layer.output_layer_norm.weight": "pytorch_model-84-of-84.pt", "yalm.output_layer.output_layer_norm.bias": "pytorch_model-84-of-84.pt", "out_bias": "pytorch_model-84-of-84.pt"}}