base_model: NousResearch/Meta-Llama-3-8B-Instruct dtype: float32 merge_method: task_arithmetic parameters: normalize: 0.0 slices: - sources: - layer_range: [0, 32] model: output/hq_rp parameters: weight: - filter: mlp value: 1.15 - filter: self_attn value: 1.025 - value: 1.0 - layer_range: [0, 32] model: NousResearch/Meta-Llama-3-8B-Instruct