models: - model: 01-ai/Yi-1.5-34B-Chat parameters: weight: 1.0 merge_method: slerp base_model: 01-ai/Yi-1.5-34B-Chat dtype: bfloat16 tokenizer_source: model:Qwen/Qwen-tokenizer parameters: t: [0, 0.5, 1, 0.5, 0] # V shaped curve: Hermes for input & output, WizardMath in the middle layers