# coding=utf-8 from transformers.configuration_utils import PretrainedConfig from open_clip import get_model_config import math from typing import Optional class PhiConfig(PretrainedConfig): """Phi configuration.""" model_type = "phi-msft" attribute_map = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size: int = 51200, n_positions: int = 2048, n_embd: int = 1024, n_layer: int = 20, n_inner: Optional[int] = None, n_head: int = 16, n_head_kv: Optional[int] = None, rotary_dim: Optional[int] = 32, activation_function: Optional[str] = "gelu_new", flash_attn: bool = False, flash_rotary: bool = False, fused_dense: bool = False, attn_pdrop: float = 0.0, embd_pdrop: float = 0.0, resid_pdrop: float = 0.0, layer_norm_epsilon: float = 1e-5, initializer_range: float = 0.02, tie_word_embeddings: bool = False, pad_vocab_size_multiple: int = 64, **kwargs ) -> None: self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple) self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_inner = n_inner self.n_head = n_head self.n_head_kv = n_head_kv self.rotary_dim = min(rotary_dim, n_embd // n_head) self.activation_function = activation_function self.flash_attn = flash_attn self.flash_rotary = flash_rotary self.fused_dense = fused_dense self.attn_pdrop = attn_pdrop self.embd_pdrop = embd_pdrop self.resid_pdrop = resid_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) class LlavaConfig(PretrainedConfig): model_type = "llava" is_composition = False def __init__( self, text_config=None, vision_tower_name="ViT-SO400M-14-SigLIP-384", ignore_index=-100, image_token_index=50297, projector_hidden_act="gelu", projector_tokens_num=1, vocab_size=51200, **kwargs, ): self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.projector_tokens_num = projector_tokens_num self.vocab_size = vocab_size self.vision_tower_name = vision_tower_name vision_config = get_model_config(vision_tower_name) self.vision_embed_dim = vision_config["embed_dim"] self.vocab_size = self.vocab_size self.text_config = text_config if isinstance(self.text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama" self.text_config = PhiConfig(**text_config) self.vocab_size = self.text_config.vocab_size super().__init__(**kwargs)