| { | |
| "model_type": "multiscale_transformer", | |
| "architectures": [ | |
| "MultiScaleForCausalLM" | |
| ], | |
| "vocab_size": 258, | |
| "d_model": 1024, | |
| "n_heads": 16, | |
| "d_ff": 2752, | |
| "n_layers_per_scale": 6, | |
| "n_cross_attn_layers": 1, | |
| "max_seq_len": 256, | |
| "dropout": 0.0, | |
| "bias": false, | |
| "rope_theta": 10000.0, | |
| "downsample_factors": [ | |
| 1, | |
| 2, | |
| 4 | |
| ], | |
| "num_parameters": 304948224, | |
| "training_results": { | |
| "model": "AXL-Code-1B", | |
| "params": 317590528, | |
| "steps": 1012, | |
| "time": 1801.6977367401123, | |
| "final_loss": 2.939126491546631, | |
| "perplexity": 31.22, | |
| "gguf_size_mb": 606.3, | |
| "context_window": 256, | |
| "note": "Trained with vanilla SGD (no momentum) due to RAM constraints. Original target 978M params, trained at 318M." | |
| } | |
| } |