models: - model: NousResearch/Meta-Llama-3-8B # No parameters necessary for base model - model: NousResearch/Meta-Llama-3-8B-Instruct parameters: density: 0.6 weight: 2 - model: Weyaxi/Einstein-v6.1-Llama3-8B parameters: density: 0.55 weight: 2 - model: cognitivecomputations/dolphin-2.9-llama3-8b parameters: density: 0.55 weight: 2 - model: nvidia/Llama3-ChatQA-1.5-8B parameters: density: 0.55 weight: 2 - model: Kukedlc/SmartLlama-3-8B-MS-v0.1 parameters: density: 0.66 weight: 1 - model: mlabonne/ChimeraLlama-3-8B-v3 parameters: density: 0.66 weight: 1 merge_method: dare_ties base_model: NousResearch/Meta-Llama-3-8B parameters: int8_mask: true dtype: float16