# SOME DESCRIPTIVE TITLE.
# Copyright (C) 2021, PaddleNLP
# This file is distributed under the same license as the PaddleNLP package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2022.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PaddleNLP \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-05-19 14:17+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.10.1\n"

#: ../source/paddlenlp.transformers.roberta.modeling.rst:2
msgid "modeling"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM:1
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM:1
#: paddlenlp.transformers.roberta.modeling.RobertaForMultipleChoice:1
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering:1
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification:1
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification:1
#: paddlenlp.transformers.roberta.modeling.RobertaModel:1
msgid "基类：:class:`paddlenlp.transformers.roberta.modeling.RobertaPretrainedModel`"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:1
msgid "The bare Roberta Model outputting raw hidden-states."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:3
msgid ""
"This model inherits from "
":class:`~paddlenlp.transformers.model_utils.PretrainedModel`. Refer to "
"the superclass documentation for the generic methods."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:6
msgid ""
"This model is also a Paddle `paddle.nn.Layer "
"<https://www.paddlepaddle.org.cn/documentation "
"/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use "
"it as a regular Paddle Layer and refer to the Paddle documentation for "
"all matter related to general usage and behavior."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM
#: paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForMultipleChoice.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward
#: paddlenlp.transformers.roberta.modeling.RobertaModel
#: paddlenlp.transformers.roberta.modeling.RobertaModel.forward
msgid "参数"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:10
msgid ""
"Vocabulary size of `inputs_ids` in `RobertaModel`. Also is the vocab size"
" of token embedding matrix. Defines the number of different tokens that "
"can be represented by the `inputs_ids` passed when calling "
"`RobertaModel`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:13
msgid ""
"Dimensionality of the embedding layer, encoder layers and pooler layer. "
"Defaults to `768`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:15
msgid "Number of hidden layers in the Transformer encoder. Defaults to `12`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:17
msgid ""
"Number of attention heads for each attention layer in the Transformer "
"encoder. Defaults to `12`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:20
msgid ""
"Dimensionality of the feed-forward (ff) layer in the encoder. Input "
"tensors to ff layers are firstly projected from `hidden_size` to "
"`intermediate_size`, and then projected back to `hidden_size`. Typically "
"`intermediate_size` is larger than `hidden_size`. Defaults to `3072`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:25
msgid ""
"The non-linear activation function in the feed-forward layer. "
"``\"gelu\"``, ``\"relu\"`` and any other paddle supported activation "
"functions are supported. Defaults to ``\"gelu\"``."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:29
msgid ""
"The dropout probability for all fully connected layers in the embeddings "
"and encoder. Defaults to `0.1`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:32
msgid ""
"The dropout probability used in MultiHeadAttention in all encoder layers "
"to drop some attention target. Defaults to `0.1`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:35
msgid ""
"The maximum value of the dimensionality of position encoding, which "
"dictates the maximum supported length of an input sequence. Defaults to "
"`512`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:38
msgid ""
"The vocabulary size of the `token_type_ids` passed when calling "
"`~transformers.RobertaModel`. Defaults to `2`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:41
msgid ""
"The standard deviation of the normal initializer. Defaults to 0.02.  .. "
"note::     A normal_initializer initializes weight matrices as normal "
"distributions.     See :meth:`RobertaPretrainedModel._init_weights()` for"
" how weights are initialized in `RobertaModel`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:41
msgid "The standard deviation of the normal initializer. Defaults to 0.02."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:44
msgid ""
"A normal_initializer initializes weight matrices as normal distributions."
" See :meth:`RobertaPretrainedModel._init_weights()` for how weights are "
"initialized in `RobertaModel`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:47
msgid "The index of padding token in the token vocabulary. Defaults to `0`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel:50
msgid "The index of cls token in the token vocabulary. Defaults to `101`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:1
msgid ""
"Indices of input sequence tokens in the vocabulary. They are numerical "
"representations of tokens that build the input sequence. It's data type "
"should be `int64` and has a shape of [batch_size, sequence_length]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:5
msgid ""
"Segment token indices to indicate first and second portions of the "
"inputs. Indices can be either 0 or 1:  - 0 corresponds to a **sentence "
"A** token, - 1 corresponds to a **sentence B** token.  It's data type "
"should be `int64` and has a shape of [batch_size, sequence_length]. "
"Defaults to None, which means no segment embeddings is added to token "
"embeddings."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:5
msgid ""
"Segment token indices to indicate first and second portions of the "
"inputs. Indices can be either 0 or 1:"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:8
msgid "0 corresponds to a **sentence A** token,"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:9
msgid "1 corresponds to a **sentence B** token."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:11
msgid ""
"It's data type should be `int64` and has a shape of [batch_size, "
"sequence_length]. Defaults to None, which means no segment embeddings is "
"added to token embeddings."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:14
msgid ""
"Indices of positions of each input sequence tokens in the position "
"embeddings. Selected in the range ``[0, max_position_embeddings - 1]``. "
"It's data type should be `int64` and has a shape of [batch_size, "
"sequence_length]. Defaults to `None`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:19
msgid ""
"Mask used in multi-head attention to avoid performing attention to some "
"unwanted positions, usually the paddings or the subsequent positions. Its"
" data type can be int, float and bool. When the data type is bool, the "
"`masked` tokens have `False` values and the others have `True` values. "
"When the data type is int, the `masked` tokens have `0` values and the "
"others have `1` values. When the data type is float, the `masked` tokens "
"have `-INF` values and the others have `0` values. It is a tensor with "
"shape broadcasted to `[batch_size, num_attention_heads, sequence_length, "
"sequence_length]`. For example, its shape can be  [batch_size, "
"sequence_length], [batch_size, sequence_length, sequence_length], "
"[batch_size, num_attention_heads, sequence_length, sequence_length]. "
"Defaults to `None`, which means nothing needed to be prevented attention "
"to."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:30
msgid ""
"Whether or not to output hidden states for all hidden layers. Defaults to"
" `False`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward
#: paddlenlp.transformers.roberta.modeling.RobertaModel.forward
msgid "返回"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:34
msgid ""
"Returns tuple (`sequence_output`, `pooled_output`) by default. Returns "
"(`encoder_outputs`, `pooled_output`) if output_hidden_states is `True`.  "
"With the fields:  - `sequence_output` (Tensor):     Sequence of hidden-"
"states at the last layer of the model.     It's data type should be "
"float32 and its shape is [batch_size, sequence_length, hidden_size].  - "
"`pooled_output` (Tensor):     The output of first token (`[CLS]`) in "
"sequence.     We \"pool\" the model by simply taking the hidden state "
"corresponding to the first token.     Its data type should be float32 and"
" its shape is [batch_size, hidden_size].  - `encoder_outputs` "
"(List(Tensor)):     A list of Tensor containing hidden-states of the "
"model at each hidden layer in the Transformer encoder.     The length of "
"the list is `num_hidden_layers`.     Each Tensor has a data type of "
"float32 and its shape is [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:34
msgid ""
"Returns tuple (`sequence_output`, `pooled_output`) by default. Returns "
"(`encoder_outputs`, `pooled_output`) if output_hidden_states is `True`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:15
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:15
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:15
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:15
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:15
#: paddlenlp.transformers.roberta.modeling.RobertaModel.forward:37
msgid "With the fields:"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:41
msgid "`sequence_output` (Tensor):"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:40
msgid ""
"Sequence of hidden-states at the last layer of the model. It's data type "
"should be float32 and its shape is [batch_size, sequence_length, "
"hidden_size]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:46
msgid "`pooled_output` (Tensor):"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:44
msgid ""
"The output of first token (`[CLS]`) in sequence. We \"pool\" the model by"
" simply taking the hidden state corresponding to the first token. Its "
"data type should be float32 and its shape is [batch_size, hidden_size]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:23
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:23
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:27
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:23
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:23
#: paddlenlp.transformers.roberta.modeling.RobertaModel.forward:50
msgid "`encoder_outputs` (List(Tensor)):"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaModel.forward:49
msgid ""
"A list of Tensor containing hidden-states of the model at each hidden "
"layer in the Transformer encoder. The length of the list is "
"`num_hidden_layers`. Each Tensor has a data type of float32 and its shape"
" is [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward
#: paddlenlp.transformers.roberta.modeling.RobertaModel.forward
msgid "返回类型"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:28
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:28
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:32
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:28
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:28
#: paddlenlp.transformers.roberta.modeling.RobertaModel.forward:55
msgid "示例"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaPretrainedModel:1
msgid "基类：:class:`paddlenlp.transformers.model_utils.PretrainedModel`"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaPretrainedModel:1
msgid ""
"An abstract class for pretrained RoBerta models. It provides RoBerta "
"related `model_config_file`, `pretrained_init_configuration`, "
"`resource_files_names`, `pretrained_resource_files_map`, "
"`base_model_prefix` for downloading and loading pretrained models. See "
":class:`~paddlenlp.transformers.model_utils.PretrainedModel` for more "
"details."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaPretrainedModel.init_weights:1
msgid "Initialization hook"
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification:1
msgid ""
"Roberta Model with a linear layer on top of the output layer, designed "
"for sequence classification/regression tasks like GLUE tasks."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification:4
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification:4
msgid "An instance of `RobertaModel`."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification:6
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification:6
msgid "The number of classes. Defaults to `2`."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification:8
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification:8
msgid ""
"The dropout probability for output of Roberta. If None, use the same "
"value as `hidden_dropout_prob` of `RobertaModel` instance `roberta`. "
"Defaults to `None`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:1
#: paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:3
#: paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:5
#: paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:7
#: paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:9
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:1
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:3
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:5
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:7
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:9
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:1
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:3
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:5
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:7
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:9
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:1
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:3
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:5
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:7
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:9
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:1
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:3
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:5
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:7
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:9
msgid "See :class:`RobertaModel`."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:12
msgid ""
"Returns tensor `logits` by default. Returns tuple (`logits`, "
"`encoder_outputs`) if output_hidden_states is set to `True`.  With the "
"fields:  - `logits` (Tensor):     a tensor of the input text "
"classification logits.     Its data type should be float32 and it has a "
"shape of [batch_size, num_classes].  - `encoder_outputs` (List(Tensor)):"
"     A list of Tensor containing hidden-states of the model at each "
"hidden layer in the Transformer encoder.     The length of the list is "
"`num_hidden_layers`.     Each Tensor has a data type of float32 and a "
"shape of [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:12
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:12
msgid ""
"Returns tensor `logits` by default. Returns tuple (`logits`, "
"`encoder_outputs`) if output_hidden_states is set to `True`."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:19
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:19
msgid "`logits` (Tensor):"
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:18
msgid ""
"a tensor of the input text classification logits. Its data type should be"
" float32 and it has a shape of [batch_size, num_classes]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:22
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:22
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:26
#: paddlenlp.transformers.roberta.modeling.RobertaForSequenceClassification.forward:22
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:22
msgid ""
"A list of Tensor containing hidden-states of the model at each hidden "
"layer in the Transformer encoder. The length of the list is "
"`num_hidden_layers`. Each Tensor has a data type of float32 and a shape "
"of [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification:1
msgid ""
"Roberta Model with a linear layer on top of the hidden-states output "
"layer, designed for token classification tasks like NER tasks."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:12
msgid ""
"Returns tensor `logits` by default. Returns tuple (`logits`, "
"`encoder_outputs`) if output_hidden_states is set to `True`.  With the "
"fields:  - `logits` (Tensor):     a tensor of the input token "
"classification logits.     Shape as `[batch_size, sequence_length, "
"num_classes]` and dtype as `float32`.  - `encoder_outputs` "
"(List(Tensor)):     A list of Tensor containing hidden-states of the "
"model at each hidden layer in the Transformer encoder.     The length of "
"the list is `num_hidden_layers`.     Each Tensor has a data type of "
"float32 and a shape of [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForTokenClassification.forward:18
msgid ""
"a tensor of the input token classification logits. Shape as `[batch_size,"
" sequence_length, num_classes]` and dtype as `float32`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering:2
msgid ""
"Roberta Model with a linear layer on top of the hidden-states output to "
"compute `span_start_logits`"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering:2
msgid "and `span_end_logits`, designed for question-answering tasks like SQuAD."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering:4
msgid "An instance of RobertaModel."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:12
msgid ""
"Returns tuple (`start_logits`, `end_logits`) by default if "
"output_hidden_states is `False`. Returns tuple (`start_logits`, "
"`end_logits`, `encoder_outputs`) if output_hidden_states is set to "
"`True`.  With the fields:  - `start_logits` (Tensor):     A tensor of the"
" input token classification logits, indicates the start position of the "
"labelled span.     Its data type should be float32 and its shape is "
"[batch_size, sequence_length].  - `end_logits` (Tensor):     A tensor of "
"the input token classification logits, indicates the end position of the "
"labelled span.     Its data type should be float32 and its shape is "
"[batch_size, sequence_length].  - `encoder_outputs` (List(Tensor)):     A"
" list of Tensor containing hidden-states of the model at each hidden "
"layer in the Transformer encoder.     The length of the list is "
"`num_hidden_layers`.     Each Tensor has a data type of float32 and a "
"shape of [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:12
msgid ""
"Returns tuple (`start_logits`, `end_logits`) by default if "
"output_hidden_states is `False`. Returns tuple (`start_logits`, "
"`end_logits`, `encoder_outputs`) if output_hidden_states is set to "
"`True`."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:19
msgid "`start_logits` (Tensor):"
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:18
msgid ""
"A tensor of the input token classification logits, indicates the start "
"position of the labelled span. Its data type should be float32 and its "
"shape is [batch_size, sequence_length]."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:23
msgid "`end_logits` (Tensor):"
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForQuestionAnswering.forward:22
msgid ""
"A tensor of the input token classification logits, indicates the end "
"position of the labelled span. Its data type should be float32 and its "
"shape is [batch_size, sequence_length]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM:1
msgid "Roberta Model with a `masked language modeling` head on top."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM:3
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM:3
msgid "class:RobertaModel`): An instance of :class:`RobertaModel`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:12
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:12
msgid ""
"Returns tensor `prediction_scores` by default. Returns tuple "
"(`prediction_scores`, `encoder_outputs`) if output_hidden_states is set "
"to `True`.  With the fields:  - `prediction_scores` (Tensor):     The "
"scores of masked token prediction.     Its data type should be float32 "
"and shape is [batch_size, sequence_length, vocab_size].  - "
"`encoder_outputs` (List(Tensor)):     A list of Tensor containing hidden-"
"states of the model at each hidden layer in the Transformer encoder.     "
"The length of the list is `num_hidden_layers`.     Each Tensor has a data"
" type of float32 and a shape of [batch_size, sequence_length, "
"hidden_size]."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:12
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:12
msgid ""
"Returns tensor `prediction_scores` by default. Returns tuple "
"(`prediction_scores`, `encoder_outputs`) if output_hidden_states is set "
"to `True`."
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:19
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:19
msgid "`prediction_scores` (Tensor):"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM.forward:18
#: paddlenlp.transformers.roberta.modeling.RobertaForMaskedLM.forward:18
msgid ""
"The scores of masked token prediction. Its data type should be float32 "
"and shape is [batch_size, sequence_length, vocab_size]."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForMultipleChoice.forward:1
msgid ""
"Defines the computation performed at every call. Should be overridden by "
"all subclasses."
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForMultipleChoice.forward:4
msgid "unpacked tuple arguments"
msgstr ""

#: of
#: paddlenlp.transformers.roberta.modeling.RobertaForMultipleChoice.forward:6
msgid "unpacked dict arguments"
msgstr ""

#: of paddlenlp.transformers.roberta.modeling.RobertaForCausalLM:1
msgid "Roberta Model with a `Causal language modeling` head on top."
msgstr ""

#~ msgid ""
#~ "Returns tuple (`sequence_output`, `pooled_output`)."
#~ "  With the fields:  - sequence_output "
#~ "(Tensor):     Sequence of hidden-states "
#~ "at the last layer of the model."
#~ "     It's data type should be float32"
#~ " and its shape is [batch_size, "
#~ "sequence_length, hidden_size].  - pooled_output "
#~ "(Tensor):     The output of first token"
#~ " (`[CLS]`) in sequence.     We \"pool\" "
#~ "the model by simply taking the "
#~ "hidden state corresponding to the first"
#~ " token.     Its data type should be"
#~ " float32 and its shape is "
#~ "[batch_size, hidden_size]."
#~ msgstr ""

#~ msgid "Returns tuple (`sequence_output`, `pooled_output`)."
#~ msgstr ""

#~ msgid "sequence_output (Tensor):"
#~ msgstr ""

#~ msgid "pooled_output (Tensor):"
#~ msgstr ""

#~ msgid ""
#~ "Returns tensor `logits`, a tensor of "
#~ "the input text classification logits. "
#~ "Its data type should be float32 "
#~ "and it has a shape of [batch_size,"
#~ " num_classes]."
#~ msgstr ""

#~ msgid ""
#~ "Returns tensor `logits`, a tensor of "
#~ "the input token classification logits. "
#~ "Shape as `[batch_size, sequence_length, "
#~ "num_classes]` and dtype as `float32`."
#~ msgstr ""

#~ msgid ""
#~ "Returns tuple (`start_logits`, `end_logits`).  "
#~ "With the fields:  - `start_logits` "
#~ "(Tensor):     A tensor of the input "
#~ "token classification logits, indicates the "
#~ "start position of the labelled span."
#~ "     Its data type should be float32"
#~ " and its shape is [batch_size, "
#~ "sequence_length].  - `end_logits` (Tensor):     "
#~ "A tensor of the input token "
#~ "classification logits, indicates the end "
#~ "position of the labelled span.     Its"
#~ " data type should be float32 and "
#~ "its shape is [batch_size, sequence_length]."
#~ msgstr ""

#~ msgid "Returns tuple (`start_logits`, `end_logits`)."
#~ msgstr ""

#~ msgid ""
#~ "Returns tensor `prediction_scores`, The scores"
#~ " of masked token prediction. Its data"
#~ " type should be float32 and shape "
#~ "is [batch_size, sequence_length, vocab_size]."
#~ msgstr ""

