models: - model: MediaTek-Research/Breeze-7B-Instruct-v0_1 # No parameters necessary for base model - model: cognitivecomputations/dolphin-2.6-mistral-7b-dpo parameters: density: 0.5 weight: 0.5 merge_method: dare_ties tokenizer_source: base base_model: MediaTek-Research/Breeze-7B-Instruct-v0_1 parameters: #normalize: false int8_mask: true dtype: bfloat16