MyLlama-8b / mergekit_config.yml
ehristoforu's picture
Upload folder using huggingface_hub
890e0af verified
raw
history blame
589 Bytes
models:
- model: NousResearch/Meta-Llama-3-8B-Instruct
# no parameters necessary for base model
- model: IlyaGusev/saiga_llama3_8b
parameters:
density: 0.5
weight: 0.5
- model: Muhammad2003/Llama3-8B-OpenHermes-DPO
parameters:
density: 0.5
weight: 0.5
- model: ruslanmv/Medical-Llama3-8B
parameters:
density: 0.5
weight: 0.5
- model: Kukedlc/LLama-3-8b-Maths
parameters:
density: 0.5
weight: 0.5
merge_method: ties
base_model: NousResearch/Meta-Llama-3-8B-Instruct
parameters:
normalize: true
dtype: bfloat16