# config/config.py
from dataclasses import dataclass

@dataclass
class ModelConfig:
    vocab_size: int = 32000             # 词表大小
    d_model: int = 768                  # Transformer 隐藏层维度
    n_layers: int = 8                   # Transformer 层数
    n_heads: int = 12                   # 多头注意力的头数
    block_size: int = 256               # 最大序列长度
    rope_theta: float = 10000.0         # RoPE 旋转位置编码的频率参数
    num_experts: int = 5                # MoE 总专家数
    experts_per_tok: int = 2            # 每个 token 路由到的专家数量（Top-k）
    expert_hidden_size: int = 3072      # 每个专家的隐藏层维度
    shared_hidden_size: int = 2048      # 共享专家的隐藏层维度

@dataclass
class TrainingConfig:
    device: str = 'cuda'                # 训练设备
    learning_rate: float = 5e-4         # 学习率
    chunk_size = 1024 * 1024            # 一次读取 1MB
    epochs: int = 4                     # 训练总轮数
    batch_size: int = 32                # 每个批次包含的样本数
    steps_per_epoch : int = 5000      # 每轮epoch训练步数


@dataclass
class PathsConfig:
    corpus_path: str = './data/pretrain_corpus.txt'                     # 原始训练语料文件路径
    tokenizer_model_path: str = 'tokenizer/pretrain_corpus_sp.model'    # SentencePiece分词器模型文件路径
    encoded_ids_path: str = './data/pretrain_corpus_tokens_IDs'         # 编码后的token ID文件保存路径
    model_output_path: str = './checkpoints'                            # 检查点保存目录
    figures_path: str = './figures'                                     # 图像保存目录
    log_dir: str = './logs'                                             # tensorboard日志路径
    checkpoint_file: str = 'latest_checkpoint.pth'                      # 训练结果保存文件名

# 实例以供导入
model_cfg = ModelConfig()
train_cfg = TrainingConfig()
paths_cfg = PathsConfig()