KempnerInstitute commited on
Commit
8d06f9d
1 Parent(s): d28cdb5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. L2L_fineweb-100b_N1.6e08_D4.5e08_C4.4e17/README.md +9 -0
  2. L2L_fineweb-100b_N1.6e08_D4.5e08_C4.4e17/config.json +39 -0
  3. L2L_fineweb-100b_N2.3e08_D3.4e09_C4.6e18/config.json +39 -0
  4. L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/README.md +9 -0
  5. L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/config.json +39 -0
  6. L2L_fineweb-edu-100b_N1.1e08_D6.9e09_C4.6e18/README.md +9 -0
  7. L2L_fineweb-edu-100b_N1.1e08_D6.9e09_C4.6e18/config.json +39 -0
  8. L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/README.md +9 -0
  9. L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/config.json +39 -0
  10. L2L_fineweb-edu-100b_N2.3e08_D3.5e10_C4.8e19/README.md +9 -0
  11. L2L_fineweb-edu-100b_N2.3e08_D3.5e10_C4.8e19/config.json +39 -0
  12. L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/README.md +9 -0
  13. L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/config.json +39 -0
  14. L2L_fineweb-edu-100b_N3.5e07_D4.5e09_C9.6e17/README.md +9 -0
  15. L2L_fineweb-edu-100b_N3.5e07_D4.5e09_C9.6e17/config.json +39 -0
  16. L2L_fineweb-edu-100b_N4.6e07_D7.7e09_C2.1e18/README.md +9 -0
  17. L2L_fineweb-edu-100b_N4.6e07_D7.7e09_C2.1e18/config.json +39 -0
  18. L2L_fineweb-edu-100b_N5.4e08_D1.5e10_C4.8e19/README.md +9 -0
  19. L2L_fineweb-edu-100b_N5.4e08_D1.5e10_C4.8e19/config.json +39 -0
  20. L2L_fineweb-edu-100b_N5.4e08_D6.8e09_C2.2e19/README.md +9 -0
  21. L2L_fineweb-edu-100b_N5.4e08_D6.8e09_C2.2e19/config.json +39 -0
  22. L2L_fineweb-edu-100b_N7.8e08_D2.1e09_C1.0e19/README.md +9 -0
  23. L2L_fineweb-edu-100b_N7.8e08_D2.1e09_C1.0e19/config.json +39 -0
  24. L2L_proof-pile-2_N1.9e08_D8.7e09_C1.0e19/README.md +9 -0
  25. L2L_proof-pile-2_N1.9e08_D8.7e09_C1.0e19/config.json +39 -0
  26. L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/README.md +9 -0
  27. L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/config.json +39 -0
  28. L2L_proof-pile-2_N2.7e07_D1.3e09_C2.0e17/README.md +9 -0
  29. L2L_proof-pile-2_N2.7e07_D1.3e09_C2.0e17/config.json +39 -0
  30. L2L_proof-pile-2_N2.7e08_D1.3e09_C2.1e18/README.md +9 -0
  31. L2L_proof-pile-2_N2.7e08_D1.3e09_C2.1e18/config.json +39 -0
  32. L2L_proof-pile-2_N5.4e08_D1.5e10_C4.8e19/README.md +9 -0
  33. L2L_proof-pile-2_N5.4e08_D1.5e10_C4.8e19/config.json +39 -0
  34. L2L_proof-pile-2_N5.4e08_D3.1e09_C1.0e19/README.md +9 -0
  35. L2L_proof-pile-2_N5.4e08_D3.1e09_C1.0e19/config.json +39 -0
  36. L2L_proof-pile-2_N5.8e07_D2.8e09_C9.6e17/README.md +9 -0
  37. L2L_proof-pile-2_N5.8e07_D2.8e09_C9.6e17/config.json +39 -0
  38. L2L_proof-pile-2_N5.8e07_D6.0e09_C2.1e18/README.md +9 -0
  39. L2L_proof-pile-2_N5.8e07_D6.0e09_C2.1e18/config.json +39 -0
  40. L2L_proof-pile-2_N9.0e07_D1.8e09_C9.6e17/README.md +9 -0
  41. L2L_proof-pile-2_N9.0e07_D1.8e09_C9.6e17/config.json +39 -0
  42. L2L_slimpajama-chunk1_N1.1e08_D1.4e09_C9.6e17/README.md +9 -0
  43. L2L_slimpajama-chunk1_N2.3e08_D1.6e10_C2.2e19/README.md +9 -0
  44. L2L_slimpajama-chunk1_N2.3e08_D1.6e10_C2.2e19/config.json +39 -0
  45. L2L_slimpajama-chunk1_N3.1e08_D2.4e09_C4.6e18/README.md +9 -0
  46. L2L_slimpajama-chunk1_N3.1e08_D2.4e09_C4.6e18/config.json +39 -0
  47. L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/README.md +9 -0
  48. L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/config.json +39 -0
  49. L2L_slimpajama-chunk1_N6.1e08_D1.2e09_C4.6e18/README.md +9 -0
  50. L2L_slimpajama-chunk1_N6.1e08_D1.2e09_C4.6e18/config.json +39 -0
L2L_fineweb-100b_N1.6e08_D4.5e08_C4.4e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-100b_N1.6e08_D4.5e08_C4.4e17/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 832,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 13,
29
+ "n_kv_heads": null,
30
+ "n_layers": 13,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-100b_N2.3e08_D3.4e09_C4.6e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N1.1e08_D6.9e09_C4.6e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N1.1e08_D6.9e09_C4.6e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 704,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 11,
29
+ "n_kv_heads": null,
30
+ "n_layers": 11,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N2.3e08_D3.5e10_C4.8e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N2.3e08_D3.5e10_C4.8e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N3.5e07_D4.5e09_C9.6e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N3.5e07_D4.5e09_C9.6e17/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 384,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 6,
29
+ "n_kv_heads": null,
30
+ "n_layers": 6,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N4.6e07_D7.7e09_C2.1e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N4.6e07_D7.7e09_C2.1e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 448,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 7,
29
+ "n_kv_heads": null,
30
+ "n_layers": 7,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N5.4e08_D1.5e10_C4.8e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N5.4e08_D1.5e10_C4.8e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1344,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 21,
29
+ "n_kv_heads": null,
30
+ "n_layers": 21,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N5.4e08_D6.8e09_C2.2e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N5.4e08_D6.8e09_C2.2e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1344,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 21,
29
+ "n_kv_heads": null,
30
+ "n_layers": 21,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_fineweb-edu-100b_N7.8e08_D2.1e09_C1.0e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_fineweb-edu-100b_N7.8e08_D2.1e09_C1.0e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1536,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 24,
29
+ "n_kv_heads": null,
30
+ "n_layers": 24,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N1.9e08_D8.7e09_C1.0e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N1.9e08_D8.7e09_C1.0e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 896,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 14,
29
+ "n_kv_heads": null,
30
+ "n_layers": 14,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N2.7e07_D1.3e09_C2.0e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N2.7e07_D1.3e09_C2.0e17/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 320,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 5,
29
+ "n_kv_heads": null,
30
+ "n_layers": 5,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N2.7e08_D1.3e09_C2.1e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N2.7e08_D1.3e09_C2.1e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1024,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 16,
29
+ "n_kv_heads": null,
30
+ "n_layers": 16,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N5.4e08_D1.5e10_C4.8e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N5.4e08_D1.5e10_C4.8e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1344,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 21,
29
+ "n_kv_heads": null,
30
+ "n_layers": 21,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N5.4e08_D3.1e09_C1.0e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N5.4e08_D3.1e09_C1.0e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1344,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 21,
29
+ "n_kv_heads": null,
30
+ "n_layers": 21,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N5.8e07_D2.8e09_C9.6e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N5.8e07_D2.8e09_C9.6e17/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 512,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 8,
29
+ "n_kv_heads": null,
30
+ "n_layers": 8,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N5.8e07_D6.0e09_C2.1e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N5.8e07_D6.0e09_C2.1e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 512,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 8,
29
+ "n_kv_heads": null,
30
+ "n_layers": 8,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_proof-pile-2_N9.0e07_D1.8e09_C9.6e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_proof-pile-2_N9.0e07_D1.8e09_C9.6e17/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 640,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 10,
29
+ "n_kv_heads": null,
30
+ "n_layers": 10,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_slimpajama-chunk1_N1.1e08_D1.4e09_C9.6e17/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_slimpajama-chunk1_N2.3e08_D1.6e10_C2.2e19/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_slimpajama-chunk1_N2.3e08_D1.6e10_C2.2e19/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 960,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 15,
29
+ "n_kv_heads": null,
30
+ "n_layers": 15,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_slimpajama-chunk1_N3.1e08_D2.4e09_C4.6e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_slimpajama-chunk1_N3.1e08_D2.4e09_C4.6e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1088,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 17,
29
+ "n_kv_heads": null,
30
+ "n_layers": 17,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1152,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 18,
29
+ "n_kv_heads": null,
30
+ "n_layers": 18,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }
L2L_slimpajama-chunk1_N6.1e08_D1.2e09_C4.6e18/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
L2L_slimpajama-chunk1_N6.1e08_D1.2e09_C4.6e18/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "gelu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "attention_dropout": 0.0,
6
+ "attention_layer_norm": true,
7
+ "attention_layer_norm_with_affine": true,
8
+ "bias_for_layer_norm": false,
9
+ "block_group_size": 1,
10
+ "block_type": "sequential",
11
+ "clip_qkv": null,
12
+ "d_model": 1408,
13
+ "embedding_dropout": 0.0,
14
+ "embedding_size": 32000,
15
+ "eos_token_id": 2,
16
+ "flash_attention": false,
17
+ "include_bias": false,
18
+ "init_cutoff_factor": null,
19
+ "init_device": "cpu",
20
+ "init_fn": "mitchell",
21
+ "init_std": 0.02,
22
+ "layer_norm_type": "default",
23
+ "layer_norm_with_affine": true,
24
+ "max_sequence_length": 512,
25
+ "mlp_hidden_size": null,
26
+ "mlp_ratio": 4,
27
+ "multi_query_attention": false,
28
+ "n_heads": 22,
29
+ "n_kv_heads": null,
30
+ "n_layers": 22,
31
+ "pad_token_id": 2,
32
+ "precision": "amp_bf16",
33
+ "residual_dropout": 0.0,
34
+ "rope": true,
35
+ "rope_full_precision": true,
36
+ "scale_logits": false,
37
+ "vocab_size": 32000,
38
+ "weight_tying": false
39
+ }