diff --git a/L2L_fineweb-100b_N2.3e08_D3.4e09_C4.6e18/model.safetensors b/L2L_fineweb-100b_N2.3e08_D3.4e09_C4.6e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..54a85d2a7d5f59eaa28e9005424d65742c9c747d --- /dev/null +++ b/L2L_fineweb-100b_N2.3e08_D3.4e09_C4.6e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0838877f30315784777def59b462ac699bc14b78e8da7c9cbb574175f26f67a1 +size 909559448 diff --git a/L2L_fineweb-100b_N4.2e08_D4.0e09_C1.0e19/README.md b/L2L_fineweb-100b_N4.2e08_D4.0e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-100b_N4.2e08_D4.0e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-100b_N4.2e08_D4.0e09_C1.0e19/config.json b/L2L_fineweb-100b_N4.2e08_D4.0e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc1be91ae6e3a3a5db6d7bbc9f81e132ab878d0 --- /dev/null +++ b/L2L_fineweb-100b_N4.2e08_D4.0e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1216, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 19, + "n_kv_heads": null, + "n_layers": 19, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-100b_N9.0e07_D8.1e08_C4.4e17/README.md b/L2L_fineweb-100b_N9.0e07_D8.1e08_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-100b_N9.0e07_D8.1e08_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-100b_N9.0e07_D8.1e08_C4.4e17/config.json b/L2L_fineweb-100b_N9.0e07_D8.1e08_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..305c6219e9d60aa18eab0d74691079e359ce6816 --- /dev/null +++ b/L2L_fineweb-100b_N9.0e07_D8.1e08_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 640, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 10, + "n_kv_heads": null, + "n_layers": 10, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N1.9e08_D1.9e10_C2.2e19/README.md b/L2L_fineweb-edu-100b_N1.9e08_D1.9e10_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N1.9e08_D1.9e10_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N1.9e08_D1.9e10_C2.2e19/config.json b/L2L_fineweb-edu-100b_N1.9e08_D1.9e10_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f728e6ee6c2b2cc150965adb6bf78b3f7d1f19e1 --- /dev/null +++ b/L2L_fineweb-edu-100b_N1.9e08_D1.9e10_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 896, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 14, + "n_kv_heads": null, + "n_layers": 14, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/model.safetensors b/L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..aeb8019d39bf3c422409efda094b0fc9365c3e67 --- /dev/null +++ b/L2L_fineweb-edu-100b_N2.3e08_D3.4e09_C4.6e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba8071faa7813ed6fcd1cec61af0c05728951d6c587c3ef1467f4e350a8e7516 +size 909559448 diff --git a/L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/model.safetensors b/L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ba78754ea9775c1334fcfe9ba04d4792d798bea7 --- /dev/null +++ b/L2L_fineweb-edu-100b_N2.3e08_D7.0e08_C9.6e17/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45aa063af3bd7c1dacdd934742d3104882c14be14e080d573c417528121658b8 +size 909559448 diff --git a/L2L_fineweb-edu-100b_N3.1e08_D5.4e09_C1.0e19/README.md b/L2L_fineweb-edu-100b_N3.1e08_D5.4e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.1e08_D5.4e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N3.1e08_D5.4e09_C1.0e19/config.json b/L2L_fineweb-edu-100b_N3.1e08_D5.4e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_fineweb-edu-100b_N3.1e08_D5.4e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N4.8e08_D1.7e10_C4.8e19/README.md b/L2L_fineweb-edu-100b_N4.8e08_D1.7e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N4.8e08_D1.7e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N4.8e08_D1.7e10_C4.8e19/config.json b/L2L_fineweb-edu-100b_N4.8e08_D1.7e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1ecddeacf8ac1c859c33a3bc4e79e4fbc8a22ae9 --- /dev/null +++ b/L2L_fineweb-edu-100b_N4.8e08_D1.7e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1280, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 20, + "n_kv_heads": null, + "n_layers": 20, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N6.1e08_D6.0e09_C2.2e19/README.md b/L2L_fineweb-edu-100b_N6.1e08_D6.0e09_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_fineweb-edu-100b_N6.1e08_D6.0e09_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_fineweb-edu-100b_N6.1e08_D6.0e09_C2.2e19/config.json b/L2L_fineweb-edu-100b_N6.1e08_D6.0e09_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bcd719639ab49b75ca716ab2c2986e97b96845f2 --- /dev/null +++ b/L2L_fineweb-edu-100b_N6.1e08_D6.0e09_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1408, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 22, + "n_kv_heads": null, + "n_layers": 22, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N1.6e08_D2.2e09_C2.1e18/README.md b/L2L_proof-pile-2_N1.6e08_D2.2e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N1.6e08_D2.2e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N1.6e08_D2.2e09_C2.1e18/config.json b/L2L_proof-pile-2_N1.6e08_D2.2e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..eefab0956111af04f5cf1d2f3741df6009e664c0 --- /dev/null +++ b/L2L_proof-pile-2_N1.6e08_D2.2e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 832, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 13, + "n_kv_heads": null, + "n_layers": 13, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N2.3e08_D1.5e09_C2.1e18/README.md b/L2L_proof-pile-2_N2.3e08_D1.5e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N2.3e08_D1.5e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N2.3e08_D1.5e09_C2.1e18/config.json b/L2L_proof-pile-2_N2.3e08_D1.5e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..75ecc120927724265e2dea013d0855357144913b --- /dev/null +++ b/L2L_proof-pile-2_N2.3e08_D1.5e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 960, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 15, + "n_kv_heads": null, + "n_layers": 15, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/model.safetensors b/L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..83e8bb32cb5c0d3e12b1243ba021ca9086d4173c --- /dev/null +++ b/L2L_proof-pile-2_N2.3e08_D3.4e09_C4.6e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:108ba2683f970703a51c6da27e065d267822c439aa0244bbb236ace5770961fd +size 909559448 diff --git a/L2L_proof-pile-2_N2.7e08_D1.4e10_C2.2e19/README.md b/L2L_proof-pile-2_N2.7e08_D1.4e10_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N2.7e08_D1.4e10_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N2.7e08_D1.4e10_C2.2e19/config.json b/L2L_proof-pile-2_N2.7e08_D1.4e10_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..13cb78433c2752f971a3e07be8753b97f7077d57 --- /dev/null +++ b/L2L_proof-pile-2_N2.7e08_D1.4e10_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1024, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 16, + "n_kv_heads": null, + "n_layers": 16, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N3.1e08_D1.1e09_C2.1e18/README.md b/L2L_proof-pile-2_N3.1e08_D1.1e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N3.1e08_D1.1e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N3.1e08_D1.1e09_C2.1e18/config.json b/L2L_proof-pile-2_N3.1e08_D1.1e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_proof-pile-2_N3.1e08_D1.1e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N3.1e08_D5.4e09_C1.0e19/README.md b/L2L_proof-pile-2_N3.1e08_D5.4e09_C1.0e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N3.1e08_D5.4e09_C1.0e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N3.1e08_D5.4e09_C1.0e19/config.json b/L2L_proof-pile-2_N3.1e08_D5.4e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_proof-pile-2_N3.1e08_D5.4e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N5.4e08_D1.4e09_C4.6e18/README.md b/L2L_proof-pile-2_N5.4e08_D1.4e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N5.4e08_D1.4e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N5.4e08_D1.4e09_C4.6e18/config.json b/L2L_proof-pile-2_N5.4e08_D1.4e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bac9bb4f890ea6cd4a9d8006d807ace3d623f0c0 --- /dev/null +++ b/L2L_proof-pile-2_N5.4e08_D1.4e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1344, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 21, + "n_kv_heads": null, + "n_layers": 21, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_proof-pile-2_N9.0e07_D8.5e09_C4.6e18/model.safetensors b/L2L_proof-pile-2_N9.0e07_D8.5e09_C4.6e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cd478db73d8e8175cb1df7c5a5e7475d09bed36b --- /dev/null +++ b/L2L_proof-pile-2_N9.0e07_D8.5e09_C4.6e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26dc77da8f47df1764bddd35ae1c08dd7524360cb5db3893779ca8051e0cacc9 +size 360561776 diff --git a/L2L_proof-pile-2_N9.7e08_D8.3e09_C4.8e19/README.md b/L2L_proof-pile-2_N9.7e08_D8.3e09_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_proof-pile-2_N9.7e08_D8.3e09_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_proof-pile-2_N9.7e08_D8.3e09_C4.8e19/config.json b/L2L_proof-pile-2_N9.7e08_D8.3e09_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..634f954d05ede293355e1f8db2b2eef6c5b0a2df --- /dev/null +++ b/L2L_proof-pile-2_N9.7e08_D8.3e09_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1664, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 26, + "n_kv_heads": null, + "n_layers": 26, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.1e08_D6.6e08_C4.4e17/README.md b/L2L_slimpajama-chunk1_N1.1e08_D6.6e08_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.1e08_D6.6e08_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.1e08_D6.6e08_C4.4e17/config.json b/L2L_slimpajama-chunk1_N1.1e08_D6.6e08_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..41baa877f3632c674fd6e77965ab3ee5a2a819e1 --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.1e08_D6.6e08_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 704, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 11, + "n_kv_heads": null, + "n_layers": 11, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.6e08_D4.7e09_C4.6e18/README.md b/L2L_slimpajama-chunk1_N1.6e08_D4.7e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.6e08_D4.7e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N1.6e08_D4.7e09_C4.6e18/config.json b/L2L_slimpajama-chunk1_N1.6e08_D4.7e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..eefab0956111af04f5cf1d2f3741df6009e664c0 --- /dev/null +++ b/L2L_slimpajama-chunk1_N1.6e08_D4.7e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 832, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 13, + "n_kv_heads": null, + "n_layers": 13, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N2.3e08_D3.5e10_C4.8e19/README.md b/L2L_slimpajama-chunk1_N2.3e08_D3.5e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N2.3e08_D3.5e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N2.3e08_D3.5e10_C4.8e19/config.json b/L2L_slimpajama-chunk1_N2.3e08_D3.5e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..75ecc120927724265e2dea013d0855357144913b --- /dev/null +++ b/L2L_slimpajama-chunk1_N2.3e08_D3.5e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 960, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 15, + "n_kv_heads": null, + "n_layers": 15, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.1e08_D1.2e10_C2.2e19/README.md b/L2L_slimpajama-chunk1_N3.1e08_D1.2e10_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.1e08_D1.2e10_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.1e08_D1.2e10_C2.2e19/config.json b/L2L_slimpajama-chunk1_N3.1e08_D1.2e10_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.1e08_D1.2e10_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D2.2e10_C4.8e19/README.md b/L2L_slimpajama-chunk1_N3.6e08_D2.2e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D2.2e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D2.2e10_C4.8e19/config.json b/L2L_slimpajama-chunk1_N3.6e08_D2.2e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D2.2e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/model.safetensors b/L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7d449755a3aeb6febe6232e629f0509e3d285979 --- /dev/null +++ b/L2L_slimpajama-chunk1_N3.6e08_D9.7e08_C2.1e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72dee2a574c9636c83358e803342971ef02a951814077773518048922a530f3f +size 1441882272 diff --git a/L2L_slimpajama-chunk1_N5.8e07_D6.0e09_C2.1e18/README.md b/L2L_slimpajama-chunk1_N5.8e07_D6.0e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N5.8e07_D6.0e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N5.8e07_D6.0e09_C2.1e18/config.json b/L2L_slimpajama-chunk1_N5.8e07_D6.0e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9edd0fa4cd96d0459cf987588de667ef40cd5c07 --- /dev/null +++ b/L2L_slimpajama-chunk1_N5.8e07_D6.0e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 512, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 8, + "n_kv_heads": null, + "n_layers": 8, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N9.0e07_D3.9e09_C2.1e18/README.md b/L2L_slimpajama-chunk1_N9.0e07_D3.9e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_slimpajama-chunk1_N9.0e07_D3.9e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_slimpajama-chunk1_N9.0e07_D3.9e09_C2.1e18/config.json b/L2L_slimpajama-chunk1_N9.0e07_D3.9e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..305c6219e9d60aa18eab0d74691079e359ce6816 --- /dev/null +++ b/L2L_slimpajama-chunk1_N9.0e07_D3.9e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 640, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 10, + "n_kv_heads": null, + "n_layers": 10, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D2.6e09_C2.1e18/README.md b/L2L_smollm-corpus_N1.3e08_D2.6e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D2.6e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D2.6e09_C2.1e18/config.json b/L2L_smollm-corpus_N1.3e08_D2.6e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fee56c0ab7ba49c7eef3e6cbba1ec901da83d98 --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D2.6e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 768, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 12, + "n_kv_heads": null, + "n_layers": 12, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D5.4e08_C4.4e17/README.md b/L2L_smollm-corpus_N1.3e08_D5.4e08_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D5.4e08_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.3e08_D5.4e08_C4.4e17/config.json b/L2L_smollm-corpus_N1.3e08_D5.4e08_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fee56c0ab7ba49c7eef3e6cbba1ec901da83d98 --- /dev/null +++ b/L2L_smollm-corpus_N1.3e08_D5.4e08_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 768, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 12, + "n_kv_heads": null, + "n_layers": 12, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.9e08_D1.8e09_C2.1e18/README.md b/L2L_smollm-corpus_N1.9e08_D1.8e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N1.9e08_D1.8e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N1.9e08_D1.8e09_C2.1e18/config.json b/L2L_smollm-corpus_N1.9e08_D1.8e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f728e6ee6c2b2cc150965adb6bf78b3f7d1f19e1 --- /dev/null +++ b/L2L_smollm-corpus_N1.9e08_D1.8e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 896, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 14, + "n_kv_heads": null, + "n_layers": 14, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.1e08_D1.1e09_C2.1e18/README.md b/L2L_smollm-corpus_N3.1e08_D1.1e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N3.1e08_D1.1e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.1e08_D1.1e09_C2.1e18/config.json b/L2L_smollm-corpus_N3.1e08_D1.1e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_smollm-corpus_N3.1e08_D1.1e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.1e08_D1.2e10_C2.2e19/README.md b/L2L_smollm-corpus_N3.1e08_D1.2e10_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N3.1e08_D1.2e10_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.1e08_D1.2e10_C2.2e19/config.json b/L2L_smollm-corpus_N3.1e08_D1.2e10_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_smollm-corpus_N3.1e08_D1.2e10_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.5e07_D2.1e09_C4.4e17/README.md b/L2L_smollm-corpus_N3.5e07_D2.1e09_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N3.5e07_D2.1e09_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.5e07_D2.1e09_C4.4e17/config.json b/L2L_smollm-corpus_N3.5e07_D2.1e09_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..115dfdf55c2ceb0f1879029732bf838f04e74f21 --- /dev/null +++ b/L2L_smollm-corpus_N3.5e07_D2.1e09_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 384, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 6, + "n_kv_heads": null, + "n_layers": 6, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.5e07_D4.5e09_C9.6e17/README.md b/L2L_smollm-corpus_N3.5e07_D4.5e09_C9.6e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N3.5e07_D4.5e09_C9.6e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.5e07_D4.5e09_C9.6e17/config.json b/L2L_smollm-corpus_N3.5e07_D4.5e09_C9.6e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..115dfdf55c2ceb0f1879029732bf838f04e74f21 --- /dev/null +++ b/L2L_smollm-corpus_N3.5e07_D4.5e09_C9.6e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 384, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 6, + "n_kv_heads": null, + "n_layers": 6, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.6e08_D2.2e10_C4.8e19/README.md b/L2L_smollm-corpus_N3.6e08_D2.2e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N3.6e08_D2.2e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N3.6e08_D2.2e10_C4.8e19/config.json b/L2L_smollm-corpus_N3.6e08_D2.2e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..21ae7419e0c137b5fa021ce7dddedae2872e4ffd --- /dev/null +++ b/L2L_smollm-corpus_N3.6e08_D2.2e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1152, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 18, + "n_kv_heads": null, + "n_layers": 18, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N5.4e08_D1.4e09_C4.6e18/README.md b/L2L_smollm-corpus_N5.4e08_D1.4e09_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N5.4e08_D1.4e09_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N5.4e08_D1.4e09_C4.6e18/config.json b/L2L_smollm-corpus_N5.4e08_D1.4e09_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bac9bb4f890ea6cd4a9d8006d807ace3d623f0c0 --- /dev/null +++ b/L2L_smollm-corpus_N5.4e08_D1.4e09_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1344, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 21, + "n_kv_heads": null, + "n_layers": 21, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N6.1e08_D1.3e10_C4.8e19/README.md b/L2L_smollm-corpus_N6.1e08_D1.3e10_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N6.1e08_D1.3e10_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N6.1e08_D1.3e10_C4.8e19/config.json b/L2L_smollm-corpus_N6.1e08_D1.3e10_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bcd719639ab49b75ca716ab2c2986e97b96845f2 --- /dev/null +++ b/L2L_smollm-corpus_N6.1e08_D1.3e10_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1408, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 22, + "n_kv_heads": null, + "n_layers": 22, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N9.0e07_D1.8e09_C9.6e17/README.md b/L2L_smollm-corpus_N9.0e07_D1.8e09_C9.6e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N9.0e07_D1.8e09_C9.6e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N9.0e07_D1.8e09_C9.6e17/config.json b/L2L_smollm-corpus_N9.0e07_D1.8e09_C9.6e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..305c6219e9d60aa18eab0d74691079e359ce6816 --- /dev/null +++ b/L2L_smollm-corpus_N9.0e07_D1.8e09_C9.6e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 640, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 10, + "n_kv_heads": null, + "n_layers": 10, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_smollm-corpus_N9.7e08_D3.8e09_C2.2e19/README.md b/L2L_smollm-corpus_N9.7e08_D3.8e09_C2.2e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_smollm-corpus_N9.7e08_D3.8e09_C2.2e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_smollm-corpus_N9.7e08_D3.8e09_C2.2e19/config.json b/L2L_smollm-corpus_N9.7e08_D3.8e09_C2.2e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..634f954d05ede293355e1f8db2b2eef6c5b0a2df --- /dev/null +++ b/L2L_smollm-corpus_N9.7e08_D3.8e09_C2.2e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1664, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 26, + "n_kv_heads": null, + "n_layers": 26, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N2.3e08_D3.4e09_C4.6e18/model.safetensors b/L2L_starcoder_N2.3e08_D3.4e09_C4.6e18/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..909353a2fed9e8a157745671ccb75946121177d0 --- /dev/null +++ b/L2L_starcoder_N2.3e08_D3.4e09_C4.6e18/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:222175c1eb6476bdee402f0333dc0e93d70bd52c851b99812222eabaeb6d5fd8 +size 909559448 diff --git a/L2L_starcoder_N2.7e07_D2.7e09_C4.4e17/README.md b/L2L_starcoder_N2.7e07_D2.7e09_C4.4e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N2.7e07_D2.7e09_C4.4e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N2.7e07_D2.7e09_C4.4e17/config.json b/L2L_starcoder_N2.7e07_D2.7e09_C4.4e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a1aca645db4b75b6555f0b3b816941f8466468f5 --- /dev/null +++ b/L2L_starcoder_N2.7e07_D2.7e09_C4.4e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 320, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 5, + "n_kv_heads": null, + "n_layers": 5, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N2.7e08_D6.0e08_C9.6e17/README.md b/L2L_starcoder_N2.7e08_D6.0e08_C9.6e17/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N2.7e08_D6.0e08_C9.6e17/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N2.7e08_D6.0e08_C9.6e17/config.json b/L2L_starcoder_N2.7e08_D6.0e08_C9.6e17/config.json new file mode 100644 index 0000000000000000000000000000000000000000..13cb78433c2752f971a3e07be8753b97f7077d57 --- /dev/null +++ b/L2L_starcoder_N2.7e08_D6.0e08_C9.6e17/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1024, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 16, + "n_kv_heads": null, + "n_layers": 16, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N2.7e08_D6.2e09_C1.0e19/config.json b/L2L_starcoder_N2.7e08_D6.2e09_C1.0e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..13cb78433c2752f971a3e07be8753b97f7077d57 --- /dev/null +++ b/L2L_starcoder_N2.7e08_D6.2e09_C1.0e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1024, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 16, + "n_kv_heads": null, + "n_layers": 16, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N3.1e08_D1.1e09_C2.1e18/README.md b/L2L_starcoder_N3.1e08_D1.1e09_C2.1e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N3.1e08_D1.1e09_C2.1e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N3.1e08_D1.1e09_C2.1e18/config.json b/L2L_starcoder_N3.1e08_D1.1e09_C2.1e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3835200c4d862cef6491b75dd50221796f727638 --- /dev/null +++ b/L2L_starcoder_N3.1e08_D1.1e09_C2.1e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1088, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 17, + "n_kv_heads": null, + "n_layers": 17, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N7.3e07_D1.0e10_C4.6e18/README.md b/L2L_starcoder_N7.3e07_D1.0e10_C4.6e18/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N7.3e07_D1.0e10_C4.6e18/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N7.3e07_D1.0e10_C4.6e18/config.json b/L2L_starcoder_N7.3e07_D1.0e10_C4.6e18/config.json new file mode 100644 index 0000000000000000000000000000000000000000..994eabfdc45a26ac580a60c69c715a0aaa9e1eae --- /dev/null +++ b/L2L_starcoder_N7.3e07_D1.0e10_C4.6e18/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 576, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 9, + "n_kv_heads": null, + "n_layers": 9, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file diff --git a/L2L_starcoder_N9.7e08_D8.3e09_C4.8e19/README.md b/L2L_starcoder_N9.7e08_D8.3e09_C4.8e19/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23bb8239b30ad636a1b592db0346c4753dc5325d --- /dev/null +++ b/L2L_starcoder_N9.7e08_D8.3e09_C4.8e19/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: [More Information Needed] +- Docs: [More Information Needed] \ No newline at end of file diff --git a/L2L_starcoder_N9.7e08_D8.3e09_C4.8e19/config.json b/L2L_starcoder_N9.7e08_D8.3e09_C4.8e19/config.json new file mode 100644 index 0000000000000000000000000000000000000000..634f954d05ede293355e1f8db2b2eef6c5b0a2df --- /dev/null +++ b/L2L_starcoder_N9.7e08_D8.3e09_C4.8e19/config.json @@ -0,0 +1,39 @@ +{ + "activation_type": "gelu", + "alibi": false, + "alibi_bias_max": 8.0, + "attention_dropout": 0.0, + "attention_layer_norm": true, + "attention_layer_norm_with_affine": true, + "bias_for_layer_norm": false, + "block_group_size": 1, + "block_type": "sequential", + "clip_qkv": null, + "d_model": 1664, + "embedding_dropout": 0.0, + "embedding_size": 32000, + "eos_token_id": 2, + "flash_attention": false, + "include_bias": false, + "init_cutoff_factor": null, + "init_device": "cpu", + "init_fn": "mitchell", + "init_std": 0.02, + "layer_norm_type": "default", + "layer_norm_with_affine": true, + "max_sequence_length": 512, + "mlp_hidden_size": null, + "mlp_ratio": 4, + "multi_query_attention": false, + "n_heads": 26, + "n_kv_heads": null, + "n_layers": 26, + "pad_token_id": 2, + "precision": "amp_bf16", + "residual_dropout": 0.0, + "rope": true, + "rope_full_precision": true, + "scale_logits": false, + "vocab_size": 32000, + "weight_tying": false +} \ No newline at end of file