base_model: TheBloke/Llama-2-13B-fp16 models: - model: TheBloke/Llama-2-13B-fp16 - model: Undi95/Unholy-v2-13B parameters: weight: 1.0 - model: Henk717/echidna-tiefigther-25 parameters: weight: 0.45 - model: KoboldAI/LLaMA2-13B-Erebus-v3 parameters: weight: 0.33 dtype: bfloat16 merge_method: task_arithmetic name: SnowyRP-13B-L2-P1 --- base_model: TheBloke/Llama-2-13B-fp16 models: - model: TheBloke/Llama-2-13B-fp16 - model: KoboldAI/LLaMA2-13B-Psyfighter2 parameters: weight: 1.0 - model: Riiid/sheep-duck-llama-2-13b parameters: weight: 0.45 - model: IkariDev/Athena-v4 parameters: weight: 0.33 dtype: bfloat16 merge_method: task_arithmetic name: SnowyRP-13B-L2-P2 --- models: - model: ddh0/EstopianOrcaMaid-13b parameters: density: [1, 0.7, 0.1] # density gradient weight: 1.0 - model: SnowyRP-13B-L2-P2 parameters: density: 0.5 weight: [0, 0.3, 0.7, 1] # weight gradient - model: SnowyRP-13B-L2-P1 parameters: density: 0.33 weight: - filter: mlp value: 0.5 - value: 0 merge_method: ties base_model: TheBloke/Llama-2-13B-fp16 parameters: normalize: true int8_mask: true dtype: bfloat16 name: SnowyRP-13B-L2-Final-V1