{ "model_type": "lumenspark", "architectures": ["LumensparkModel"], "vocab_size": 50257, "embed_dim": 768, "auto_map": { "AutoConfig": "anto18671/lumenspark--configuration_lumenspark.LumensparkConfig", "AutoModelForCausalLM": "anto18671/lumenspark--modeling_lumenspark.LumensparkModel" }, "tokenizer_class": "GPT2Tokenizer", "depth": 8, "heads": 12, "seq_length": 768, "dropout": 0.058823529411764705, "k": 384, "rank": 256 }