slices: - sources: - model: "shenzhi-wang/Llama3-8B-Chinese-Chat" layer_range: [0, 10] - sources: - model: "hfl/llama-3-chinese-8b-instruct-v2" layer_range: [7, 17] - sources: - model: "NousResearch/Hermes-2-Pro-Llama-3-8B" layer_range: [13, 23] - sources: - model: "NousResearch/Hermes-2-Pro-Llama-3-8B" layer_range: [18, 28] - sources: - model: "NousResearch/Hermes-2-Pro-Llama-3-8B" layer_range: [22, 32] merge_method: passthrough base_model: "meta-llama/Meta-Llama-3-8B-Instruct" dtype: bfloat16