base_model: rinna/llama-3-youko-8b #https://huggingface.co/rinna/llama-3-youko-8b gate_mode: hidden # one of "hidden", "cheap_embed", or "random" dtype: bfloat16 # output dtype (float32, float16, or bfloat16) ## (optional) #experts_per_token: 2 experts: - source_model: haqishen/Llama-3-8B-Japanese-Instruct #https://huggingface.co/haqishen/Llama-3-8B-Japanese-Instruct positive_prompts: - "日本語での一問一答の専門家です。" - source_model: lightblue/suzume-llama-3-8B-japanese #https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct positive_prompts: - "日本語でのマルチターン会話の専門家です。" - source_model: aixsatoshi/Llama-3-8b-Cosmopedia-japanese #https://huggingface.co/aixsatoshi/Llama-3-8b-Cosmopedia-japanese positive_prompts: - "コーディングや数学の専門家です。"