Source code for transformers.modeling_auto

# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """


import logging
import warnings
from collections import OrderedDict

from .configuration_auto import (
    AlbertConfig,
    AutoConfig,
    BartConfig,
    BertConfig,
    CamembertConfig,
    CTRLConfig,
    DistilBertConfig,
    ElectraConfig,
    EncoderDecoderConfig,
    FlaubertConfig,
    GPT2Config,
    LongformerConfig,
    MobileBertConfig,
    OpenAIGPTConfig,
    ReformerConfig,
    RetriBertConfig,
    RobertaConfig,
    T5Config,
    TransfoXLConfig,
    XLMConfig,
    XLMRobertaConfig,
    XLNetConfig,
)
from .configuration_marian import MarianConfig
from .configuration_utils import PretrainedConfig
from .modeling_albert import (
    AlbertForMaskedLM,
    AlbertForMultipleChoice,
    AlbertForPreTraining,
    AlbertForQuestionAnswering,
    AlbertForSequenceClassification,
    AlbertForTokenClassification,
    AlbertModel,
)
from .modeling_bart import (
    BartForConditionalGeneration,
    BartForQuestionAnswering,
    BartForSequenceClassification,
    BartModel,
)
from .modeling_bert import (
    BertForMaskedLM,
    BertForMultipleChoice,
    BertForPreTraining,
    BertForQuestionAnswering,
    BertForSequenceClassification,
    BertForTokenClassification,
    BertLMHeadModel,
    BertModel,
)
from .modeling_camembert import (
    CamembertForMaskedLM,
    CamembertForMultipleChoice,
    CamembertForSequenceClassification,
    CamembertForTokenClassification,
    CamembertModel,
)
from .modeling_ctrl import CTRLLMHeadModel, CTRLModel
from .modeling_distilbert import (
    DistilBertForMaskedLM,
    DistilBertForMultipleChoice,
    DistilBertForQuestionAnswering,
    DistilBertForSequenceClassification,
    DistilBertForTokenClassification,
    DistilBertModel,
)
from .modeling_electra import (
    ElectraForMaskedLM,
    ElectraForMultipleChoice,
    ElectraForPreTraining,
    ElectraForQuestionAnswering,
    ElectraForSequenceClassification,
    ElectraForTokenClassification,
    ElectraModel,
)
from .modeling_encoder_decoder import EncoderDecoderModel
from .modeling_flaubert import (
    FlaubertForQuestionAnsweringSimple,
    FlaubertForSequenceClassification,
    FlaubertModel,
    FlaubertWithLMHeadModel,
)
from .modeling_gpt2 import GPT2LMHeadModel, GPT2Model
from .modeling_longformer import (
    LongformerForMaskedLM,
    LongformerForMultipleChoice,
    LongformerForQuestionAnswering,
    LongformerForSequenceClassification,
    LongformerForTokenClassification,
    LongformerModel,
)
from .modeling_marian import MarianMTModel
from .modeling_mobilebert import (
    MobileBertForMaskedLM,
    MobileBertForMultipleChoice,
    MobileBertForPreTraining,
    MobileBertForQuestionAnswering,
    MobileBertForSequenceClassification,
    MobileBertForTokenClassification,
    MobileBertModel,
)
from .modeling_openai import OpenAIGPTLMHeadModel, OpenAIGPTModel
from .modeling_reformer import (
    ReformerForMaskedLM,
    ReformerForQuestionAnswering,
    ReformerModel,
    ReformerModelWithLMHead,
)
from .modeling_retribert import RetriBertModel
from .modeling_roberta import (
    RobertaForMaskedLM,
    RobertaForMultipleChoice,
    RobertaForQuestionAnswering,
    RobertaForSequenceClassification,
    RobertaForTokenClassification,
    RobertaModel,
)
from .modeling_t5 import T5ForConditionalGeneration, T5Model
from .modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel
from .modeling_xlm import (
    XLMForQuestionAnsweringSimple,
    XLMForSequenceClassification,
    XLMForTokenClassification,
    XLMModel,
    XLMWithLMHeadModel,
)
from .modeling_xlm_roberta import (
    XLMRobertaForMaskedLM,
    XLMRobertaForMultipleChoice,
    XLMRobertaForQuestionAnswering,
    XLMRobertaForSequenceClassification,
    XLMRobertaForTokenClassification,
    XLMRobertaModel,
)
from .modeling_xlnet import (
    XLNetForMultipleChoice,
    XLNetForQuestionAnsweringSimple,
    XLNetForSequenceClassification,
    XLNetForTokenClassification,
    XLNetLMHeadModel,
    XLNetModel,
)


logger = logging.getLogger(__name__)


MODEL_MAPPING = OrderedDict(
    [
        (RetriBertConfig, RetriBertModel),
        (T5Config, T5Model),
        (DistilBertConfig, DistilBertModel),
        (AlbertConfig, AlbertModel),
        (CamembertConfig, CamembertModel),
        (XLMRobertaConfig, XLMRobertaModel),
        (BartConfig, BartModel),
        (LongformerConfig, LongformerModel),
        (RobertaConfig, RobertaModel),
        (BertConfig, BertModel),
        (OpenAIGPTConfig, OpenAIGPTModel),
        (GPT2Config, GPT2Model),
        (MobileBertConfig, MobileBertModel),
        (TransfoXLConfig, TransfoXLModel),
        (XLNetConfig, XLNetModel),
        (FlaubertConfig, FlaubertModel),
        (XLMConfig, XLMModel),
        (CTRLConfig, CTRLModel),
        (ElectraConfig, ElectraModel),
        (ReformerConfig, ReformerModel),
    ]
)

MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
    [
        (RetriBertConfig, RetriBertModel),
        (T5Config, T5ForConditionalGeneration),
        (DistilBertConfig, DistilBertForMaskedLM),
        (AlbertConfig, AlbertForPreTraining),
        (CamembertConfig, CamembertForMaskedLM),
        (XLMRobertaConfig, XLMRobertaForMaskedLM),
        (BartConfig, BartForConditionalGeneration),
        (LongformerConfig, LongformerForMaskedLM),
        (RobertaConfig, RobertaForMaskedLM),
        (BertConfig, BertForPreTraining),
        (OpenAIGPTConfig, OpenAIGPTLMHeadModel),
        (GPT2Config, GPT2LMHeadModel),
        (MobileBertConfig, MobileBertForPreTraining),
        (TransfoXLConfig, TransfoXLLMHeadModel),
        (XLNetConfig, XLNetLMHeadModel),
        (FlaubertConfig, FlaubertWithLMHeadModel),
        (XLMConfig, XLMWithLMHeadModel),
        (CTRLConfig, CTRLLMHeadModel),
        (ElectraConfig, ElectraForPreTraining),
    ]
)

MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
    [
        (T5Config, T5ForConditionalGeneration),
        (DistilBertConfig, DistilBertForMaskedLM),
        (AlbertConfig, AlbertForMaskedLM),
        (CamembertConfig, CamembertForMaskedLM),
        (XLMRobertaConfig, XLMRobertaForMaskedLM),
        (MarianConfig, MarianMTModel),
        (BartConfig, BartForConditionalGeneration),
        (LongformerConfig, LongformerForMaskedLM),
        (RobertaConfig, RobertaForMaskedLM),
        (BertConfig, BertForMaskedLM),
        (OpenAIGPTConfig, OpenAIGPTLMHeadModel),
        (GPT2Config, GPT2LMHeadModel),
        (MobileBertConfig, MobileBertForMaskedLM),
        (TransfoXLConfig, TransfoXLLMHeadModel),
        (XLNetConfig, XLNetLMHeadModel),
        (FlaubertConfig, FlaubertWithLMHeadModel),
        (XLMConfig, XLMWithLMHeadModel),
        (CTRLConfig, CTRLLMHeadModel),
        (ElectraConfig, ElectraForMaskedLM),
        (EncoderDecoderConfig, EncoderDecoderModel),
        (ReformerConfig, ReformerModelWithLMHead),
    ]
)

MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict(
    [
        (BertConfig, BertLMHeadModel),
        (OpenAIGPTConfig, OpenAIGPTLMHeadModel),
        (GPT2Config, GPT2LMHeadModel),
        (TransfoXLConfig, TransfoXLLMHeadModel),
        (XLNetConfig, XLNetLMHeadModel),
        (
            XLMConfig,
            XLMWithLMHeadModel,
        ),  # XLM can be MLM and CLM => model should be split similar to BERT; leave here for now
        (CTRLConfig, CTRLLMHeadModel),
        (ReformerConfig, ReformerModelWithLMHead),
    ]
)

MODEL_FOR_MASKED_LM_MAPPING = OrderedDict(
    [
        (DistilBertConfig, DistilBertForMaskedLM),
        (AlbertConfig, AlbertForMaskedLM),
        (CamembertConfig, CamembertForMaskedLM),
        (XLMRobertaConfig, XLMRobertaForMaskedLM),
        (LongformerConfig, LongformerForMaskedLM),
        (RobertaConfig, RobertaForMaskedLM),
        (BertConfig, BertForMaskedLM),
        (MobileBertConfig, MobileBertForMaskedLM),
        (FlaubertConfig, FlaubertWithLMHeadModel),
        (XLMConfig, XLMWithLMHeadModel),
        (ElectraConfig, ElectraForMaskedLM),
        (ReformerConfig, ReformerForMaskedLM),
    ]
)

MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = OrderedDict(
    [
        (T5Config, T5ForConditionalGeneration),
        (MarianConfig, MarianMTModel),
        (BartConfig, BartForConditionalGeneration),
        (EncoderDecoderConfig, EncoderDecoderModel),
    ]
)

MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
    [
        (DistilBertConfig, DistilBertForSequenceClassification),
        (AlbertConfig, AlbertForSequenceClassification),
        (CamembertConfig, CamembertForSequenceClassification),
        (XLMRobertaConfig, XLMRobertaForSequenceClassification),
        (BartConfig, BartForSequenceClassification),
        (LongformerConfig, LongformerForSequenceClassification),
        (RobertaConfig, RobertaForSequenceClassification),
        (BertConfig, BertForSequenceClassification),
        (XLNetConfig, XLNetForSequenceClassification),
        (MobileBertConfig, MobileBertForSequenceClassification),
        (FlaubertConfig, FlaubertForSequenceClassification),
        (XLMConfig, XLMForSequenceClassification),
        (ElectraConfig, ElectraForSequenceClassification),
    ]
)

MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
    [
        (DistilBertConfig, DistilBertForQuestionAnswering),
        (AlbertConfig, AlbertForQuestionAnswering),
        (BartConfig, BartForQuestionAnswering),
        (LongformerConfig, LongformerForQuestionAnswering),
        (XLMRobertaConfig, XLMRobertaForQuestionAnswering),
        (RobertaConfig, RobertaForQuestionAnswering),
        (BertConfig, BertForQuestionAnswering),
        (XLNetConfig, XLNetForQuestionAnsweringSimple),
        (FlaubertConfig, FlaubertForQuestionAnsweringSimple),
        (MobileBertConfig, MobileBertForQuestionAnswering),
        (XLMConfig, XLMForQuestionAnsweringSimple),
        (ElectraConfig, ElectraForQuestionAnswering),
        (ReformerConfig, ReformerForQuestionAnswering),
    ]
)

MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
    [
        (DistilBertConfig, DistilBertForTokenClassification),
        (CamembertConfig, CamembertForTokenClassification),
        (XLMConfig, XLMForTokenClassification),
        (XLMRobertaConfig, XLMRobertaForTokenClassification),
        (LongformerConfig, LongformerForTokenClassification),
        (RobertaConfig, RobertaForTokenClassification),
        (BertConfig, BertForTokenClassification),
        (MobileBertConfig, MobileBertForTokenClassification),
        (XLNetConfig, XLNetForTokenClassification),
        (AlbertConfig, AlbertForTokenClassification),
        (ElectraConfig, ElectraForTokenClassification),
    ]
)


MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict(
    [
        (CamembertConfig, CamembertForMultipleChoice),
        (ElectraConfig, ElectraForMultipleChoice),
        (XLMRobertaConfig, XLMRobertaForMultipleChoice),
        (LongformerConfig, LongformerForMultipleChoice),
        (RobertaConfig, RobertaForMultipleChoice),
        (BertConfig, BertForMultipleChoice),
        (DistilBertConfig, DistilBertForMultipleChoice),
        (MobileBertConfig, MobileBertForMultipleChoice),
        (XLNetConfig, XLNetForMultipleChoice),
        (AlbertConfig, AlbertForMultipleChoice),
    ]
)


[docs]class AutoModel: r""" :class:`~transformers.AutoModel` is a generic model class that will be instantiated as one of the base model classes of the library when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or the `AutoModel.from_config(config)` class methods. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModel is designed to be instantiated " "using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModel.from_config(config)` methods." )
[docs] @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertModel` (DistilBERT model) - isInstance of `longformer` configuration class: :class:`~transformers.LongformerModel` (Longformer model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaModel` (RoBERTa model) - isInstance of `bert` configuration class: :class:`~transformers.BertModel` (Bert model) - isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTModel` (OpenAI GPT model) - isInstance of `gpt2` configuration class: :class:`~transformers.GPT2Model` (OpenAI GPT-2 model) - isInstance of `ctrl` configuration class: :class:`~transformers.CTRLModel` (Salesforce CTRL model) - isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLModel` (Transformer-XL model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetModel` (XLNet model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMModel` (XLM model) - isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertModel` (Flaubert model) - isInstance of `electra` configuration class: :class:`~transformers.ElectraModel` (Electra model) Examples:: >>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. >>> model = AutoModel.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys()) ) )
[docs] @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the base model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `t5`: :class:`~transformers.T5Model` (T5 model) - `distilbert`: :class:`~transformers.DistilBertModel` (DistilBERT model) - `albert`: :class:`~transformers.AlbertModel` (ALBERT model) - `camembert`: :class:`~transformers.CamembertModel` (CamemBERT model) - `xlm-roberta`: :class:`~transformers.XLMRobertaModel` (XLM-RoBERTa model) - `longformer` :class:`~transformers.LongformerModel` (Longformer model) - `roberta`: :class:`~transformers.RobertaModel` (RoBERTa model) - `bert`: :class:`~transformers.BertModel` (Bert model) - `openai-gpt`: :class:`~transformers.OpenAIGPTModel` (OpenAI GPT model) - `gpt2`: :class:`~transformers.GPT2Model` (OpenAI GPT-2 model) - `transfo-xl`: :class:`~transformers.TransfoXLModel` (Transformer-XL model) - `xlnet`: :class:`~transformers.XLNetModel` (XLNet model) - `xlm`: :class:`~transformers.XLMModel` (XLM model) - `ctrl`: :class:`~transformers.CTRLModel` (Salesforce CTRL model) - `flaubert`: :class:`~transformers.FlaubertModel` (Flaubert model) - `electra`: :class:`~transformers.ElectraModel` (Electra model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys()) ) )
[docs]class AutoModelForPreTraining: r""" :class:`~transformers.AutoModelForPreTraining` is a generic model class that will be instantiated as one of the model classes of the library -with the architecture used for pretraining this model– when created with the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForPreTraining is designed to be instantiated " "using the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForPreTraining.from_config(config)` methods." )
[docs] @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model) - isInstance of `longformer` configuration class: :class:`~transformers.LongformerForMaskedLM` (Longformer model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model) - isInstance of `bert` configuration class: :class:`~transformers.BertForPreTraining` (Bert model) - isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model) - isInstance of `gpt2` configuration class: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model) - isInstance of `ctrl` configuration class: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model) - isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetLMHeadModel` (XLNet model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMWithLMHeadModel` (XLM model) - isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model) - isInstance of `electra` configuration class: :class:`~transformers.ElectraForPreTraining` (Electra model) Examples:: >>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. >>> model = AutoModelForPreTraining.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys()) ) )
[docs] @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the model classes of the library -with the architecture used for pretraining this model– from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model) - `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model) - `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model) - `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model) - `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model) - `longformer`: :class:`~transformers.LongformerForMaskedLM` (Longformer model) - `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model) - `bert`: :class:`~transformers.BertForPreTraining` (Bert model) - `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model) - `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model) - `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model) - `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model) - `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model) - `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model) - `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model) - `electra`: :class:`~transformers.ElectraForPreTraining` (Electra model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely received file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForPreTraining.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = AutoModelForPreTraining.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys()) ) )
[docs]class AutoModelWithLMHead: r""" :class:`~transformers.AutoModelWithLMHead` is a generic model class that will be instantiated as one of the language modeling model classes of the library when created with the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelWithLMHead is designed to be instantiated " "using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelWithLMHead.from_config(config)` methods." )
[docs] @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model) - isInstance of `longformer` configuration class: :class:`~transformers.LongformerForMaskedLM` (Longformer model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model) - isInstance of `bert` configuration class: :class:`~transformers.BertForMaskedLM` (Bert model) - isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model) - isInstance of `gpt2` configuration class: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model) - isInstance of `ctrl` configuration class: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model) - isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetLMHeadModel` (XLNet model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMWithLMHeadModel` (XLM model) - isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model) - isInstance of `electra` configuration class: :class:`~transformers.ElectraForMaskedLM` (Electra model) Examples:: config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. model = AutoModelWithLMHead.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ warnings.warn( "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.", FutureWarning, ) for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()) ) )
[docs] @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the language modeling model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `t5`: :class:`~transformers.T5ForConditionalGeneration` (T5 model) - `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model) - `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model) - `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model) - `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model) - `longformer`: :class:`~transformers.LongformerForMaskedLM` (Longformer model) - `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model) - `bert`: :class:`~transformers.BertForMaskedLM` (Bert model) - `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model) - `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model) - `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model) - `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model) - `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model) - `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model) - `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model) - `electra`: :class:`~transformers.ElectraForMaskedLM` (Electra model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely received file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ warnings.warn( "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.", FutureWarning, ) config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()) ) )
class AutoModelForCausalLM: r""" :class:`~transformers.AutoModelForCausalLM` is a generic model class that will be instantiated as one of the language modeling model classes of the library when created with the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForCausalLM is designed to be instantiated " "using the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForCausalLM.from_config(config)` methods." ) @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `bert` configuration class: :class:`~transformers.BertLMHeadModel` (Bert model) - isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model) - isInstance of `gpt2` configuration class: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model) - isInstance of `ctrl` configuration class: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model) - isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetLMHeadModel` (XLNet model) - isInstance of `reformer` configuration class: :class:`~transformers.ReformerModelWithLMHead` (Reformer model) Examples:: config = GPT2Config.from_pretrained('gpt2') # Download configuration from S3 and cache. model = AutoModelForCausalLM.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_CAUSAL_LM_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys()) ) ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the language modeling model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `bert`: :class:`~transformers.BertLMHeadModel` (Bert model) - `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model) - `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model) - `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model) - `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model) - `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model) - `reformer`: :class:`~transformers.ReformerModelWithLMHead` (Google Reformer model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely received file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache. model = AutoModelForCausalLM.from_pretrained('./test/gpt2_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json') model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_CAUSAL_LM_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys()) ) ) class AutoModelForMaskedLM: r""" :class:`~transformers.AutoModelForMaskedLM` is a generic model class that will be instantiated as one of the language modeling model classes of the library when created with the `AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForMaskedLM is designed to be instantiated " "using the `AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForMaskedLM.from_config(config)` methods." ) @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model) - isInstance of `longformer` configuration class: :class:`~transformers.LongformerForMaskedLM` (Longformer model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model) - isInstance of `bert` configuration class: :class:`~transformers.BertForMaskedLM` (Bert model) - isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMWithLMHeadModel` (XLM model) - isInstance of `xlm-roberta` configuration class: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-Roberta model) - isInstance of `electra` configuration class: :class:`~transformers.ElectraForMaskedLM` (Electra model) - isInstance of `camembert` configuration class: :class:`~transformers.CamembertForMaskedLM` (Camembert model) - isInstance of `albert` configuration class: :class:`~transformers.AlbertForMaskedLM` (Albert model) Examples:: config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. model = AutoModelForMaskedLM.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_MASKED_LM_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys()) ) ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the language modeling model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model) - `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model) - `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model) - `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model) - `longformer`: :class:`~transformers.LongformerForMaskedLM` (Longformer model) - `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model) - `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model) - `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model) - `electra`: :class:`~transformers.ElectraForMaskedLM` (Electra model) - `bert`: :class:`~transformers.BertLMHeadModel` (Bert model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely received file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForMaskedLM.from_pretrained('bert') # Download model and configuration from S3 and cache. model = AutoModelForMaskedLM.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_MASKED_LM_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys()) ) ) class AutoModelForSeq2SeqLM: r""" :class:`~transformers.AutoModelForSeq2SeqLM` is a generic model class that will be instantiated as one of the language modeling model classes of the library when created with the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForSeq2SeqLM is designed to be instantiated " "using the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForSeq2SeqLM.from_config(config)` methods." ) @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `t5` configuration class: :class:`~transformers.T5ForConditionalGeneration` (T5 model) - isInstance of `bart` configuration class: :class:`~transformers.BartForConditionalGeneration` (Bart model) - isInstance of `marian` configuration class: :class:`~transformers.MarianMTModel` (Marian model) - isInstance of `encoder-decoder` configuration class: :class:`~transformers.EncoderDecoderModel` (Encoder Decoder model) Examples:: config = T5Config.from_pretrained('t5') model = AutoModelForSeq2SeqLM.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()), ) ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the language modeling model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `t5`: :class:`~transformers.T5ForConditionalGeneration` (T5 model) - `bart`: :class:`~transformers.BartForConditionalGeneration` (Bert model) - `marian`: :class:`~transformers.MarianMTModel` (Marian model) - `encoder-decoder`: :class:`~transformers.EncoderDecoderModel` (Encoder Decoder model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely received file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForSeq2SeqLM.from_pretrained('t5-base') # Download model and configuration from S3 and cache. model = AutoModelForSeq2SeqLM.from_pretrained('./test/t5_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json') model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()), ) )
[docs]class AutoModelForSequenceClassification: r""" :class:`~transformers.AutoModelForSequenceClassification` is a generic model class that will be instantiated as one of the sequence classification model classes of the library when created with the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForSequenceClassification is designed to be instantiated " "using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForSequenceClassification.from_config(config)` methods." )
[docs] @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model) - isInstance of `albert` configuration class: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model) - isInstance of `camembert` configuration class: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model) - isInstance of `xlm roberta` configuration class: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model) - isInstance of `bert` configuration class: :class:`~transformers.BertForSequenceClassification` (Bert model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetForSequenceClassification` (XLNet model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMForSequenceClassification` (XLM model) - isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model) Examples:: config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()), ) )
[docs] @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the sequence classification model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `distilbert`: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model) - `albert`: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model) - `camembert`: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model) - `xlm-roberta`: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model) - `roberta`: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model) - `bert`: :class:`~transformers.BertForSequenceClassification` (Bert model) - `xlnet`: :class:`~transformers.XLNetForSequenceClassification` (XLNet model) - `flaubert`: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaining positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()), ) )
[docs]class AutoModelForQuestionAnswering: r""" :class:`~transformers.AutoModelForQuestionAnswering` is a generic model class that will be instantiated as one of the question answering model classes of the library when created with the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForQuestionAnswering is designed to be instantiated " "using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForQuestionAnswering.from_config(config)` methods." )
[docs] @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForQuestionAnswering` (DistilBERT model) - isInstance of `albert` configuration class: :class:`~transformers.AlbertForQuestionAnswering` (ALBERT model) - isInstance of `bert` configuration class: :class:`~transformers.BertModelForQuestionAnswering` (Bert model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetForQuestionAnswering` (XLNet model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMForQuestionAnswering` (XLM model) - isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertForQuestionAnswering` (XLM model) Examples:: config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. model = AutoModelForQuestionAnswering.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()), ) )
[docs] @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the question answering model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `distilbert`: :class:`~transformers.DistilBertForQuestionAnswering` (DistilBERT model) - `albert`: :class:`~transformers.AlbertForQuestionAnswering` (ALBERT model) - `bert`: :class:`~transformers.BertForQuestionAnswering` (Bert model) - `xlnet`: :class:`~transformers.XLNetForQuestionAnswering` (XLNet model) - `xlm`: :class:`~transformers.XLMForQuestionAnswering` (XLM model) - `flaubert`: :class:`~transformers.FlaubertForQuestionAnswering` (XLM model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()), ) )
[docs]class AutoModelForTokenClassification: r""" :class:`~transformers.AutoModelForTokenClassification` is a generic model class that will be instantiated as one of the token classification model classes of the library when created with the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForTokenClassification is designed to be instantiated " "using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForTokenClassification.from_config(config)` methods." )
[docs] @classmethod def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :func:`~transformers.AutoModel.from_pretrained` to load the model weights Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertModelForTokenClassification` (DistilBERT model) - isInstance of `xlm` configuration class: :class:`~transformers.XLMForTokenClassification` (XLM model) - isInstance of `xlm roberta` configuration class: :class:`~transformers.XLMRobertaModelForTokenClassification` (XLMRoberta model) - isInstance of `bert` configuration class: :class:`~transformers.BertModelForTokenClassification` (Bert model) - isInstance of `albert` configuration class: :class:`~transformers.AlbertForTokenClassification` (AlBert model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetModelForTokenClassification` (XLNet model) - isInstance of `camembert` configuration class: :class:`~transformers.CamembertModelForTokenClassification` (Camembert model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaModelForTokenClassification` (Roberta model) - isInstance of `electra` configuration class: :class:`~transformers.ElectraForTokenClassification` (Electra model) Examples:: config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. model = AutoModelForTokenClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')` """ for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()), ) )
[docs] @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the question answering model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance based on the `model_type` property of the config object, or when it's missing, falling back to using pattern matching on the `pretrained_model_name_or_path` string: - `distilbert`: :class:`~transformers.DistilBertForTokenClassification` (DistilBERT model) - `xlm`: :class:`~transformers.XLMForTokenClassification` (XLM model) - `xlm-roberta`: :class:`~transformers.XLMRobertaForTokenClassification` (XLM-RoBERTa?Para model) - `camembert`: :class:`~transformers.CamembertForTokenClassification` (Camembert model) - `bert`: :class:`~transformers.BertForTokenClassification` (Bert model) - `xlnet`: :class:`~transformers.XLNetForTokenClassification` (XLNet model) - `roberta`: :class:`~transformers.RobertaForTokenClassification` (Roberta model) - `electra`: :class:`~transformers.ElectraForTokenClassification` (Electra model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Args: pretrained_model_name_or_path: Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: These arguments will be passed to the configuration and the model. Examples:: model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = AutoModelForTokenClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()), ) )
class AutoModelForMultipleChoice: r""" :class:`~transformers.AutoModelForMultipleChoice` is a generic model class that will be instantiated as one of the multiple choice model classes of the library when created with the `AutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` class method. This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForMultipleChoice is designed to be instantiated " "using the `AutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForMultipleChoice.from_config(config)` methods." ) @classmethod def from_config(cls, config): for config_class, model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()), ) ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for config_class, model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()), ) )