File size: 701 Bytes
e75ded6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
models:
- model: beomi/Llama-3-Open-Ko-8B-Instruct-preview
parameters:
weight: 0.5 # Equal weight to maintain balance between foundational language processing and advanced technical tasks
layer_range: [0, 20] # Use foundational and intermediate language processing layers in Korean
- model: cognitivecomputations/dolphin-2.9-llama3-8b
parameters:
weight: 0.5 # Equal weight to complement and balance the capabilities of the Llama model
layer_range: [15, 24] # Utilize advanced coding and domain-specific layers
merge_method: linear # Balanced combination of layers using weighted average
dtype: float16 # Efficient resource usage for computational performance
|