|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Bloom configuration""" |
|
from collections import OrderedDict |
|
from typing import TYPE_CHECKING, Any, List, Mapping, Optional |
|
|
|
from packaging import version |
|
|
|
|
|
if TYPE_CHECKING: |
|
from transformers import PreTrainedTokenizer, TensorType |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.utils import is_torch_available, logging |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
class CCBloomConfig(PretrainedConfig): |
|
""" |
|
This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom |
|
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the |
|
defaults will yield a similar configuration to the Bloom architecture |
|
[bigscience/bloom](https://huggingface.co/bigscience/bloom). |
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 250880): |
|
Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented |
|
by the `inputs_ids` passed when calling [`BloomModel`]. Check [this |
|
discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the |
|
`vocab_size` has been defined. |
|
hidden_size (`int`, *optional*, defaults to 64): |
|
Dimensionality of the embeddings and hidden states. |
|
n_layer (`int`, *optional*, defaults to 2): |
|
Number of hidden layers in the Transformer encoder. |
|
n_head (`int`, *optional*, defaults to 8): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): |
|
The epsilon to use in the layer normalization layers. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`): |
|
If enabled, use the layer norm of the hidden states as the residual in the transformer blocks |
|
hidden_dropout (`float`, *optional*, defaults to 0.1): |
|
Dropout rate of the dropout function on the bias dropout. |
|
attention_dropout (`float`, *optional*, defaults to 0.1): |
|
Dropout rate applied to the attention probs |
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
Whether or not the model should return the last key/values attentions (not used by all models). |
|
pretraining_tp (`int`, *optional*, defaults to `1`): |
|
Experimental feature. Tensor parallelism rank used during pretraining with Megatron. Please refer to [this |
|
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is |
|
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this |
|
issue](https://github.com/pytorch/pytorch/issues/76232). Note also that this is enabled only when |
|
`slow_but_exact=True`. |
|
slow_but_exact (`bool`, *optional*, defaults to `False`): |
|
Experimental feature. Whether to use slow but exact implementation of the attention mechanism. While |
|
merging the TP rank tensors, due to slicing operations the results may be slightly different between the |
|
model trained on Megatron and our model. Please refer to [this |
|
issue](https://github.com/pytorch/pytorch/issues/76232). A solution to obtain more accurate results is to |
|
enable this feature. Enabling this will hurt the computational time of the inference. Will be probably |
|
resolved in the future once the main model has been fine-tuned with TP_rank=1. |
|
""" |
|
|
|
model_type = "ccbloom" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
attribute_map = { |
|
"num_hidden_layers": "n_layer", |
|
"num_attention_heads": "n_head", |
|
} |
|
|
|
def __init__( |
|
self, |
|
vocab_size=250880, |
|
hidden_size=64, |
|
n_layer=2, |
|
n_head=8, |
|
layer_norm_epsilon=1e-5, |
|
initializer_range=0.02, |
|
use_cache=True, |
|
bos_token_id=1, |
|
eos_token_id=2, |
|
apply_residual_connection_post_layernorm=False, |
|
hidden_dropout=0.0, |
|
attention_dropout=0.0, |
|
pretraining_tp=1, |
|
slow_but_exact=False, |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
|
|
n_embed = kwargs.pop("n_embed", None) |
|
self.hidden_size = hidden_size if n_embed is None else n_embed |
|
self.n_layer = n_layer |
|
self.n_head = n_head |
|
self.layer_norm_epsilon = layer_norm_epsilon |
|
self.initializer_range = initializer_range |
|
self.use_cache = use_cache |
|
self.pretraining_tp = pretraining_tp |
|
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm |
|
self.hidden_dropout = hidden_dropout |
|
self.attention_dropout = attention_dropout |
|
|
|
self.bos_token_id = bos_token_id |
|
self.eos_token_id = eos_token_id |
|
self.slow_but_exact = slow_but_exact |
|
|
|
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
|
|
|
|
|
|