|
{ |
|
"device": "cuda:0", |
|
"seed": 42, |
|
"dtype": "torch.bfloat16", |
|
"hook_point_in": "blocks.2.ln2.hook_normalized", |
|
"hook_point_out": "blocks.2.hook_mlp_out", |
|
"use_decoder_bias": true, |
|
"apply_decoder_bias_to_pre_encoder": false, |
|
"expansion_factor": 32, |
|
"d_model": 4096, |
|
"d_sae": 131072, |
|
"bias_init_method": "all_zero", |
|
"act_fn": "jumprelu", |
|
"jump_relu_threshold": 0.0250244140625, |
|
"norm_activation": "dataset-wise", |
|
"dataset_average_activation_norm": { |
|
"in": 63.75, |
|
"out": 1.34375 |
|
}, |
|
"decoder_exactly_fixed_norm": false, |
|
"sparsity_include_decoder_norm": true, |
|
"use_glu_encoder": false, |
|
"init_decoder_norm": 0.5, |
|
"init_encoder_norm": 0.5, |
|
"init_encoder_with_decoder_transpose": false, |
|
"lp": 1, |
|
"l1_coefficient": 8e-05, |
|
"l1_coefficient_warmup_steps": 78125, |
|
"top_k": 50, |
|
"k_warmup_steps": 78125, |
|
"use_batch_norm_mse": true, |
|
"use_ghost_grads": false, |
|
"tp_size": 1, |
|
"ddp_size": 1 |
|
} |