diff --git a/L2L_fineweb-100b_N1.3e08_D5.4e08_C4.4e17/README.md b/L2L_fineweb-100b_N1.3e08_D5.4e08_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-100b_N1.3e08_D5.4e08_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-100b_N1.3e08_D5.4e08_C4.4e17/config.json b/L2L_fineweb-100b_N1.3e08_D5.4e08_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fee56c0ab7ba49c7eef3e6cbba1ec901da83d98 --- /dev/null +++ b/L2L_fineweb-100b_N1.3e08_D5.4e08_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 768, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 12, + "n_kv_heads": null, + "n_layers": 12, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/model.safetensors b/L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8095149ea0e4c66eef95c78cc1f3ed77719337c6 --- /dev/null +++ b/L2L_fineweb-100b_N2.3e08_D7.3e09_C1.0e19/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61f3aab975e2d712d2f29219b116aed604ad63e9270d76297c040645644c5229 +size 909559448 diff --git a/L2L_fineweb-100b_N3.6e08_D2.1e09_C4.6e18/README.md b/L2L_fineweb-100b_N3.6e08_D2.1e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-100b_N3.6e08_D2.1e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-100b_N3.6e08_D2.1e09_C4.6e18/config.json b/L2L_fineweb-100b_N3.6e08_D2.1e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_fineweb-100b_N3.6e08_D2.1e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-100b_N4.2e08_D1.9e10_C4.8e19/README.md b/L2L_fineweb-100b_N4.2e08_D1.9e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-100b_N4.2e08_D1.9e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-100b_N4.2e08_D1.9e10_C4.8e19/config.json b/L2L_fineweb-100b_N4.2e08_D1.9e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc1be91ae6e3a3a5db6d7bbc9f81e132ab878d0 --- /dev/null +++ b/L2L_fineweb-100b_N4.2e08_D1.9e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1216, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 19, + "n_kv_heads": null, + "n_layers": 19, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-100b_N5.8e07_D1.3e09_C4.4e17/README.md b/L2L_fineweb-100b_N5.8e07_D1.3e09_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-100b_N5.8e07_D1.3e09_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-100b_N5.8e07_D1.3e09_C4.4e17/config.json b/L2L_fineweb-100b_N5.8e07_D1.3e09_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9edd0fa4cd96d0459cf987588de667ef40cd5c07 --- /dev/null +++ b/L2L_fineweb-100b_N5.8e07_D1.3e09_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 512, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 8, + "n_kv_heads": null, + "n_layers": 8, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N1.6e08_D1.0e10_C1.0e19/README.md b/L2L_fineweb-edu-100b_N1.6e08_D1.0e10_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N1.6e08_D1.0e10_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N1.6e08_D1.0e10_C1.0e19/config.json b/L2L_fineweb-edu-100b_N1.6e08_D1.0e10_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..eefab0956111af04f5cf1d2f3741df6009e664c0 --- /dev/null +++ b/L2L_fineweb-edu-100b_N1.6e08_D1.0e10_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 832, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 13, + "n_kv_heads": null, + "n_layers": 13, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N1.7e09_D4.6e09_C4.8e19/config.json b/L2L_fineweb-edu-100b_N1.7e09_D4.6e09_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6a39bf702d11693743d9cd16fd34dbb6d13bb3ae --- /dev/null +++ b/L2L_fineweb-edu-100b_N1.7e09_D4.6e09_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 2048, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 32, + "n_kv_heads": null, + "n_layers": 32, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.1e08_D1.1e09_C2.1e18/README.md b/L2L_fineweb-edu-100b_N3.1e08_D1.1e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.1e08_D1.1e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.1e08_D1.1e09_C2.1e18/config.json b/L2L_fineweb-edu-100b_N3.1e08_D1.1e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.1e08_D1.1e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.6e08_D1.0e10_C2.2e19/README.md b/L2L_fineweb-edu-100b_N3.6e08_D1.0e10_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.6e08_D1.0e10_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.6e08_D1.0e10_C2.2e19/config.json b/L2L_fineweb-edu-100b_N3.6e08_D1.0e10_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.6e08_D1.0e10_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.6e08_D2.1e09_C4.6e18/README.md b/L2L_fineweb-edu-100b_N3.6e08_D2.1e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.6e08_D2.1e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.6e08_D2.1e09_C4.6e18/config.json b/L2L_fineweb-edu-100b_N3.6e08_D2.1e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.6e08_D2.1e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.6e08_D2.2e10_C4.8e19/README.md b/L2L_fineweb-edu-100b_N3.6e08_D2.2e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.6e08_D2.2e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.6e08_D2.2e10_C4.8e19/config.json b/L2L_fineweb-edu-100b_N3.6e08_D2.2e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.6e08_D2.2e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N4.6e07_D7.3e08_C2.0e17/model.safetensors b/L2L_fineweb-edu-100b_N4.6e07_D7.3e08_C2.0e17/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c71cf24e4dd44ac6d1b39c363e090389e7d686cf --- /dev/null +++ b/L2L_fineweb-edu-100b_N4.6e07_D7.3e08_C2.0e17/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:904fb262f21ae9191b9429c87d07888be3aa7194cea8147a8433cf09be0cc9c1 +size 182182704 diff --git a/L2L_fineweb-edu-100b_N5.8e07_D5.8e08_C2.0e17/README.md b/L2L_fineweb-edu-100b_N5.8e07_D5.8e08_C2.0e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N5.8e07_D5.8e08_C2.0e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N5.8e07_D5.8e08_C2.0e17/config.json b/L2L_fineweb-edu-100b_N5.8e07_D5.8e08_C2.0e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9edd0fa4cd96d0459cf987588de667ef40cd5c07 --- /dev/null +++ b/L2L_fineweb-edu-100b_N5.8e07_D5.8e08_C2.0e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 512, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 8, + "n_kv_heads": null, + "n_layers": 8, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N3.1e08_D2.4e09_C4.6e18/README.md b/L2L_proof-pile-2_N3.1e08_D2.4e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N3.1e08_D2.4e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N3.1e08_D2.4e09_C4.6e18/config.json b/L2L_proof-pile-2_N3.1e08_D2.4e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_proof-pile-2_N3.1e08_D2.4e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N4.2e08_D4.0e09_C1.0e19/README.md b/L2L_proof-pile-2_N4.2e08_D4.0e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N4.2e08_D4.0e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N4.2e08_D4.0e09_C1.0e19/config.json b/L2L_proof-pile-2_N4.2e08_D4.0e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc1be91ae6e3a3a5db6d7bbc9f81e132ab878d0 --- /dev/null +++ b/L2L_proof-pile-2_N4.2e08_D4.0e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1216, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 19, + "n_kv_heads": null, + "n_layers": 19, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N6.1e08_D1.3e10_C4.8e19/README.md b/L2L_proof-pile-2_N6.1e08_D1.3e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N6.1e08_D1.3e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N6.1e08_D1.3e10_C4.8e19/config.json b/L2L_proof-pile-2_N6.1e08_D1.3e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bcd719639ab49b75ca716ab2c2986e97b96845f2 --- /dev/null +++ b/L2L_proof-pile-2_N6.1e08_D1.3e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1408, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 22, + "n_kv_heads": null, + "n_layers": 22, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N9.0e07_D3.9e09_C2.1e18/README.md b/L2L_proof-pile-2_N9.0e07_D3.9e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N9.0e07_D3.9e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N9.0e07_D3.9e09_C2.1e18/config.json b/L2L_proof-pile-2_N9.0e07_D3.9e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..305c6219e9d60aa18eab0d74691079e359ce6816 --- /dev/null +++ b/L2L_proof-pile-2_N9.0e07_D3.9e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 640, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 10, + "n_kv_heads": null, + "n_layers": 10, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.1e08_D1.5e10_C1.0e19/README.md b/L2L_slimpajama-chunk1_N1.1e08_D1.5e10_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.1e08_D1.5e10_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.1e08_D1.5e10_C1.0e19/config.json b/L2L_slimpajama-chunk1_N1.1e08_D1.5e10_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..41baa877f3632c674fd6e77965ab3ee5a2a819e1 --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.1e08_D1.5e10_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 704, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 11, + "n_kv_heads": null, + "n_layers": 11, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.5e09_D5.6e09_C4.8e19/README.md b/L2L_slimpajama-chunk1_N1.5e09_D5.6e09_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.5e09_D5.6e09_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.5e09_D5.6e09_C4.8e19/config.json b/L2L_slimpajama-chunk1_N1.5e09_D5.6e09_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b0ac0ef6bc85f96b3d24ba507bc08973d74c3d3a --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.5e09_D5.6e09_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1920, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 30, + "n_kv_heads": null, + "n_layers": 30, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.9e08_D1.8e09_C2.1e18/model.safetensors b/L2L_slimpajama-chunk1_N1.9e08_D1.8e09_C2.1e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3774824c7aa98205f0bd7b511ea163574bf0ec9e --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.9e08_D1.8e09_C2.1e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35fb6905b22f9fdb0d2ae2b462e12c3fce3e5f030a208f80ff5a1852070d41e9 +size 769084968 diff --git a/L2L_slimpajama-chunk1_N2.0e07_D3.7e09_C4.4e17/README.md b/L2L_slimpajama-chunk1_N2.0e07_D3.7e09_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N2.0e07_D3.7e09_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N2.0e07_D3.7e09_C4.4e17/config.json b/L2L_slimpajama-chunk1_N2.0e07_D3.7e09_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cacd3da0cfff31756238a0a22f7bd7b455cefe6c --- /dev/null +++ b/L2L_slimpajama-chunk1_N2.0e07_D3.7e09_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 256, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 4, + "n_kv_heads": null, + "n_layers": 4, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N2.7e08_D6.2e09_C1.0e19/model.safetensors b/L2L_slimpajama-chunk1_N2.7e08_D6.2e09_C1.0e19/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a8afea30991c84686557445f21bc983913927f9d --- /dev/null +++ b/L2L_slimpajama-chunk1_N2.7e08_D6.2e09_C1.0e19/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5038bec26ed188d1c819ca5a3dc086acf3985b7fde2c1bf07e591e4643d1450e +size 1067730840 diff --git a/L2L_slimpajama-chunk1_N3.1e08_D2.6e10_C4.8e19/README.md b/L2L_slimpajama-chunk1_N3.1e08_D2.6e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.1e08_D2.6e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.1e08_D2.6e10_C4.8e19/config.json b/L2L_slimpajama-chunk1_N3.1e08_D2.6e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.1e08_D2.6e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.5e07_D2.1e09_C4.4e17/README.md b/L2L_slimpajama-chunk1_N3.5e07_D2.1e09_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.5e07_D2.1e09_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.5e07_D2.1e09_C4.4e17/config.json b/L2L_slimpajama-chunk1_N3.5e07_D2.1e09_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..115dfdf55c2ceb0f1879029732bf838f04e74f21 --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.5e07_D2.1e09_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 384, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 6, + "n_kv_heads": null, + "n_layers": 6, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D2.1e09_C4.6e18/README.md b/L2L_slimpajama-chunk1_N3.6e08_D2.1e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D2.1e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D2.1e09_C4.6e18/config.json b/L2L_slimpajama-chunk1_N3.6e08_D2.1e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D2.1e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D4.6e09_C1.0e19/README.md b/L2L_slimpajama-chunk1_N3.6e08_D4.6e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D4.6e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D4.6e09_C1.0e19/config.json b/L2L_slimpajama-chunk1_N3.6e08_D4.6e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D4.6e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N4.2e08_D1.9e10_C4.8e19/README.md b/L2L_slimpajama-chunk1_N4.2e08_D1.9e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N4.2e08_D1.9e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N4.2e08_D1.9e10_C4.8e19/config.json b/L2L_slimpajama-chunk1_N4.2e08_D1.9e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc1be91ae6e3a3a5db6d7bbc9f81e132ab878d0 --- /dev/null +++ b/L2L_slimpajama-chunk1_N4.2e08_D1.9e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1216, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 19, + "n_kv_heads": null, + "n_layers": 19, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N4.2e08_D8.4e08_C2.1e18/README.md b/L2L_slimpajama-chunk1_N4.2e08_D8.4e08_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N4.2e08_D8.4e08_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N4.2e08_D8.4e08_C2.1e18/config.json b/L2L_slimpajama-chunk1_N4.2e08_D8.4e08_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc1be91ae6e3a3a5db6d7bbc9f81e132ab878d0 --- /dev/null +++ b/L2L_slimpajama-chunk1_N4.2e08_D8.4e08_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1216, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 19, + "n_kv_heads": null, + "n_layers": 19, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N4.8e08_D3.5e09_C1.0e19/README.md b/L2L_slimpajama-chunk1_N4.8e08_D3.5e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N4.8e08_D3.5e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N4.8e08_D3.5e09_C1.0e19/config.json b/L2L_slimpajama-chunk1_N4.8e08_D3.5e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1ecddeacf8ac1c859c33a3bc4e79e4fbc8a22ae9 --- /dev/null +++ b/L2L_slimpajama-chunk1_N4.8e08_D3.5e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1280, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 20, + "n_kv_heads": null, + "n_layers": 20, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N6.1e08_D1.3e10_C4.8e19/README.md b/L2L_slimpajama-chunk1_N6.1e08_D1.3e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N6.1e08_D1.3e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N6.1e08_D1.3e10_C4.8e19/config.json b/L2L_slimpajama-chunk1_N6.1e08_D1.3e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bcd719639ab49b75ca716ab2c2986e97b96845f2 --- /dev/null +++ b/L2L_slimpajama-chunk1_N6.1e08_D1.3e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1408, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 22, + "n_kv_heads": null, + "n_layers": 22, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N7.3e07_D2.2e09_C9.6e17/model.safetensors b/L2L_slimpajama-chunk1_N7.3e07_D2.2e09_C9.6e17/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5f417b4e5ae57adfb22eabd8caf95789b0051421 --- /dev/null +++ b/L2L_slimpajama-chunk1_N7.3e07_D2.2e09_C9.6e17/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ec79360e58d3029dbfba94c8fbe9c6c652cd5327750afcc1790cb0208db9d99 +size 290876416 diff --git a/L2L_smollm-corpus_N1.3e08_D1.2e09_C9.6e17/README.md b/L2L_smollm-corpus_N1.3e08_D1.2e09_C9.6e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D1.2e09_C9.6e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D1.2e09_C9.6e17/config.json b/L2L_smollm-corpus_N1.3e08_D1.2e09_C9.6e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fee56c0ab7ba49c7eef3e6cbba1ec901da83d98 --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D1.2e09_C9.6e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 768, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 12, + "n_kv_heads": null, + "n_layers": 12, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D1.2e10_C1.0e19/README.md b/L2L_smollm-corpus_N1.3e08_D1.2e10_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D1.2e10_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D1.2e10_C1.0e19/config.json b/L2L_smollm-corpus_N1.3e08_D1.2e10_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fee56c0ab7ba49c7eef3e6cbba1ec901da83d98 --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D1.2e10_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 768, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 12, + "n_kv_heads": null, + "n_layers": 12, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N2.3e08_D3.4e09_C4.6e18/model.safetensors b/L2L_smollm-corpus_N2.3e08_D3.4e09_C4.6e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..05217fa0c79dd82a50bb760481eb012c514893e6 --- /dev/null +++ b/L2L_smollm-corpus_N2.3e08_D3.4e09_C4.6e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb3d72bc2bf2370cd3fb84350106cdf1415c03053d7339338793c6c0ba69bdbd +size 909559448 diff --git a/L2L_smollm-corpus_N2.7e07_D2.7e09_C4.4e17/README.md b/L2L_smollm-corpus_N2.7e07_D2.7e09_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N2.7e07_D2.7e09_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N2.7e07_D2.7e09_C4.4e17/config.json b/L2L_smollm-corpus_N2.7e07_D2.7e09_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a1aca645db4b75b6555f0b3b816941f8466468f5 --- /dev/null +++ b/L2L_smollm-corpus_N2.7e07_D2.7e09_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 320, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 5, + "n_kv_heads": null, + "n_layers": 5, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.5e07_D9.5e08_C2.0e17/README.md b/L2L_smollm-corpus_N3.5e07_D9.5e08_C2.0e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N3.5e07_D9.5e08_C2.0e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.5e07_D9.5e08_C2.0e17/config.json b/L2L_smollm-corpus_N3.5e07_D9.5e08_C2.0e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..115dfdf55c2ceb0f1879029732bf838f04e74f21 --- /dev/null +++ b/L2L_smollm-corpus_N3.5e07_D9.5e08_C2.0e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 384, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 6, + "n_kv_heads": null, + "n_layers": 6, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N4.6e07_D7.7e09_C2.1e18/README.md b/L2L_smollm-corpus_N4.6e07_D7.7e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N4.6e07_D7.7e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N4.6e07_D7.7e09_C2.1e18/config.json b/L2L_smollm-corpus_N4.6e07_D7.7e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6ddcaca5a561d50cfa15ad13fe288fd743cf7562 --- /dev/null +++ b/L2L_smollm-corpus_N4.6e07_D7.7e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 448, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 7, + "n_kv_heads": null, + "n_layers": 7, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N4.8e08_D1.6e09_C4.6e18/README.md b/L2L_smollm-corpus_N4.8e08_D1.6e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N4.8e08_D1.6e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N4.8e08_D1.6e09_C4.6e18/config.json b/L2L_smollm-corpus_N4.8e08_D1.6e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1ecddeacf8ac1c859c33a3bc4e79e4fbc8a22ae9 --- /dev/null +++ b/L2L_smollm-corpus_N4.8e08_D1.6e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1280, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 20, + "n_kv_heads": null, + "n_layers": 20, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N7.3e07_D4.8e09_C2.1e18/README.md b/L2L_smollm-corpus_N7.3e07_D4.8e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N7.3e07_D4.8e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N7.3e07_D4.8e09_C2.1e18/config.json b/L2L_smollm-corpus_N7.3e07_D4.8e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..994eabfdc45a26ac580a60c69c715a0aaa9e1eae --- /dev/null +++ b/L2L_smollm-corpus_N7.3e07_D4.8e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 576, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 9, + "n_kv_heads": null, + "n_layers": 9, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N9.0e07_D8.1e08_C4.4e17/README.md b/L2L_smollm-corpus_N9.0e07_D8.1e08_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N9.0e07_D8.1e08_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N9.0e07_D8.1e08_C4.4e17/config.json b/L2L_smollm-corpus_N9.0e07_D8.1e08_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..305c6219e9d60aa18eab0d74691079e359ce6816 --- /dev/null +++ b/L2L_smollm-corpus_N9.0e07_D8.1e08_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 640, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 10, + "n_kv_heads": null, + "n_layers": 10, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N1.3e08_D1.2e09_C9.6e17/README.md b/L2L_starcoder_N1.3e08_D1.2e09_C9.6e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N1.3e08_D1.2e09_C9.6e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N1.3e08_D1.2e09_C9.6e17/config.json b/L2L_starcoder_N1.3e08_D1.2e09_C9.6e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fee56c0ab7ba49c7eef3e6cbba1ec901da83d98 --- /dev/null +++ b/L2L_starcoder_N1.3e08_D1.2e09_C9.6e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 768, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 12, + "n_kv_heads": null, + "n_layers": 12, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N1.6e08_D4.5e08_C4.4e17/README.md b/L2L_starcoder_N1.6e08_D4.5e08_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N1.6e08_D4.5e08_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N1.6e08_D4.5e08_C4.4e17/config.json b/L2L_starcoder_N1.6e08_D4.5e08_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..eefab0956111af04f5cf1d2f3741df6009e664c0 --- /dev/null +++ b/L2L_starcoder_N1.6e08_D4.5e08_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 832, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 13, + "n_kv_heads": null, + "n_layers": 13, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N2.3e08_D7.0e08_C9.6e17/model.safetensors b/L2L_starcoder_N2.3e08_D7.0e08_C9.6e17/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ae92c979f0ed4e880285a133bf873b1df93c3c5a --- /dev/null +++ b/L2L_starcoder_N2.3e08_D7.0e08_C9.6e17/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c6cae4a82565c3dbf70b43c175db94c5aa843ce000a11df7fa753650ac01039 +size 909559448 diff --git a/L2L_starcoder_N4.8e08_D1.7e10_C4.8e19/README.md b/L2L_starcoder_N4.8e08_D1.7e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N4.8e08_D1.7e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N4.8e08_D1.7e10_C4.8e19/config.json b/L2L_starcoder_N4.8e08_D1.7e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1ecddeacf8ac1c859c33a3bc4e79e4fbc8a22ae9 --- /dev/null +++ b/L2L_starcoder_N4.8e08_D1.7e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1280, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 20, + "n_kv_heads": null, + "n_layers": 20, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N5.8e07_D5.8e08_C2.0e17/model.safetensors b/L2L_starcoder_N5.8e07_D5.8e08_C2.0e17/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e4550f444cc3f5127be345f31f4a4d4fcf6e0668 --- /dev/null +++ b/L2L_starcoder_N5.8e07_D5.8e08_C2.0e17/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b47d150f7fab863abc9a18152b6a76009c30f68924f95afe5dc6d405c1e1540f +size 231809928 diff --git a/L2L_starcoder_N6.1e08_D2.7e09_C1.0e19/README.md b/L2L_starcoder_N6.1e08_D2.7e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N6.1e08_D2.7e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N6.1e08_D2.7e09_C1.0e19/config.json b/L2L_starcoder_N6.1e08_D2.7e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bcd719639ab49b75ca716ab2c2986e97b96845f2 --- /dev/null +++ b/L2L_starcoder_N6.1e08_D2.7e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1408, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 22, + "n_kv_heads": null, + "n_layers": 22, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file