slices: - sources: - model: ehristoforu/0001lp layer_range: [0, 32] - sources: - model: NeuralNovel/Llama-3-NeuralPaca-8b layer_range: [24, 32] - sources: - model: cognitivecomputations/dolphin-2.9-llama3-8b layer_range: [26, 32] - sources: - model: vicgalle/Configurable-Llama-3-8B-v0.2 layer_range: [28, 32] merge_method: passthrough dtype: bfloat16