newmoe-0 / mergekit_moe_config.yml
ehristoforu's picture
Upload folder using huggingface_hub
af54a92 verified
base_model: Orion-zhen/Meissa-Qwen2.5-7B-Instruct
architecture: qwen
gate_mode: hidden
dtype: bfloat16
experts:
- source_model: Orion-zhen/Meissa-Qwen2.5-7B-Instruct
positive_prompts: ["chat", "assistant", "chat history", "chat context", "writing", "text writing", "editing", "text editing", "multilingual"]
- source_model: Qwen/Qwen2.5-Math-1.5B
positive_prompts: ["bio", "science", "biology", "natural sciences", "scientist", "math", "mathematician", "problem solving", "calculating", "logics"]
- source_model: Qwen/Qwen2.5-Coder-3B-Instruct
positive_prompts: ["code", "coding", "coder", "programming", "programmer", "code analysis", "code review", "code fix", "code improvement"]
- source_model: RefalMachine/ruadapt_qwen2.5_3B_ext_u48_instruct_v4
positive_prompts: ["russian chat", "russian chatting", "russian", "russian language", "russian text writing/editing"]
- source_model: Kukedlc/Qwen2.5-1.5B-Spanish-1.0-DPO
positive_prompts: ["spanis chat", "spanish chatting", "spanish", "spanish language", "spanish text writing/editing"]
shared_experts:
- source_model: Orion-zhen/Meissa-Qwen2.5-7B-Instruct
positive_prompts: # required by Qwen MoE for "hidden" gate mode, otherwise not allowed
- "chat assistant"
# (optional, but recommended:)
residual_scale: 0.1 # downweight output from shared expert to prevent overcooking the model