#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
from typing import Optional, Dict, Any
from dataclasses import dataclass, field

@dataclass
class PiscesConfig:
    """Pisces L1 model configuration"""
    # The type of the model, set to "pisces_l1" by default.
    model_type: str = "pisces_l1"
    # The size of the vocabulary.
    vocab_size: int = 100_352
    # The hidden layer size of the model.
    hidden_size: int = 2048
    # The number of layers in the model.
    n_layer: int = 24
    # The number of attention heads.
    n_head: int = 16
    # The number of key-value attention heads.
    n_kv_head: int = 4
    # The number of experts in the Mixture of Experts (MoE) architecture.
    moe_num_experts: int = 64
    # The top-k experts to use in the Mixture of Experts (MoE) architecture.
    moe_top_k: int = 2
    # The size of the intermediate layer.
    intermediate_size: int = 5632
    # The maximum number of position embeddings.
    max_position_embeddings: int = 8192
    # The base value for RoPE (Rotary Position Embedding).
    rope_theta: float = 1e6
    # The dropout rate.
    dropout: float = 0.0
    # The resolution of the input image.
    image_res: int = 224
    # The size of the image patch.
    image_patch: int = 14
    # The number of multimodal tokens.
    mm_tokens: int = 256
    # The number of audio tokens.
    audio_tokens: int = 512
    # The number of task classes.
    task_classes: int = 256
    # The number of evaluation dimensions.
    eval_dims: int = 7
    # Configuration for RoPE scaling, using YARN type by default.
    rope_scaling: dict = field(default_factory=lambda: {"type": "yarn", "factor": 32, "original_max_position_embeddings": 32768})
    
    @classmethod
    def from_json(cls, path):
        """Load configuration from JSON file"""
        return cls(**json.load(open(path)))