merge_method: linear parameters: weight: 1.0 slices: - sources: - model: cognitivecomputations/dolphin-2.8-mistral-7b-v02 layer_range: [0,1] - model: NexusFlow/Starling-LM-7B-beta layer_range: [0,1] parameters: weight: 0 - sources: - model: cognitivecomputations/dolphin-2.8-mistral-7b-v02 layer_range: [1,8] - sources: - model: NexusFlow/Starling-LM-7B-beta layer_range: [4,12] - sources: - model: cognitivecomputations/dolphin-2.8-mistral-7b-v02 layer_range: [8,16] - sources: - model: NexusFlow/Starling-LM-7B-beta layer_range: [12,20] - sources: - model: cognitivecomputations/dolphin-2.8-mistral-7b-v02 layer_range: [16,24] - sources: - model: NexusFlow/Starling-LM-7B-beta layer_range: [20,28] - sources: - model: cognitivecomputations/dolphin-2.8-mistral-7b-v02 layer_range: [24,31] - sources: - model: cognitivecomputations/dolphin-2.8-mistral-7b-v02 layer_range: [31,32] - model: NexusFlow/Starling-LM-7B-beta layer_range: [31,32] parameters: weight: 0 dtype: float16 tokenizer_source: model:cognitivecomputations/dolphin-2.8-mistral-7b-v02