# SOME DESCRIPTIVE TITLE.
# Copyright (C) 2021, PaddleNLP
# This file is distributed under the same license as the PaddleNLP package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2022.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PaddleNLP \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2022-03-18 21:31+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.9.0\n"

#: ../source/paddlenlp.transformers.xlnet.modeling.rst:2
msgid "modeling"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling:1
msgid "Modeling classes for XLNet model."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetPretrainedModel:1
msgid "基类：:class:`paddlenlp.transformers.model_utils.PretrainedModel`"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetPretrainedModel:1
msgid ""
"An abstract class for pretrained XLNet models. It provides XLNet related "
"`model_config_file`, `resource_files_names`, "
"`pretrained_resource_files_map`, `pretrained_init_configuration`, "
"`base_model_prefix` for downloading and loading pretrained models. See "
":class:`~paddlenlp.transformers.model_utils.PretrainedModel` for more "
"details."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice:1
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering:1
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification:1
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification:1
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel:1
#: paddlenlp.transformers.xlnet.modeling.XLNetModel:1
msgid "基类：:class:`paddlenlp.transformers.xlnet.modeling.XLNetPretrainedModel`"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:1
msgid "The bare XLNet Model outputting raw hidden-states."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:3
msgid ""
"This model inherits from "
":class:`~paddlenlp.transformers.model_utils.PretrainedModel`. Refer to "
"the superclass documentation for the generic methods."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:6
msgid ""
"This model is also a `paddle.nn.Layer "
"<https://www.paddlepaddle.org.cn/documentation "
"/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use "
"it as a regular Paddle Layer and refer to the Paddle documentation for "
"all matter related to general usage and behavior."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetModel
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward
msgid "参数"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:10
msgid ""
"Vocabulary size of `inputs_ids` in `XLNetModel`. Also is the vocab size "
"of token embedding matrix."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:13
msgid ""
"The number of tokens to cache. If not 0 or None, the last `mem_len` "
"hidden states in each layer will be cached into memory. Defaults to "
"`None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:16
msgid ""
"The number of tokens in the current batch to be cached. If positive, then"
" at most `reuse_len` tokens can be cached in the current batch. "
"Otherwise, there is no limit to the number of tokens. Defaults to `None`."
"  .. note::     The difference between `mem_len` and `reuse_len` is that "
"`mem_len` defines     **the total number** of tokens to cache while "
"`reuse_len` defines the number of tokens     in **the current batch** to "
"be cached."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:16
msgid ""
"The number of tokens in the current batch to be cached. If positive, then"
" at most `reuse_len` tokens can be cached in the current batch. "
"Otherwise, there is no limit to the number of tokens. Defaults to `None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:21
msgid ""
"The difference between `mem_len` and `reuse_len` is that `mem_len` "
"defines **the total number** of tokens to cache while `reuse_len` defines"
" the number of tokens in **the current batch** to be cached."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:25
msgid ""
"Dimensionality of the embedding layers, encoder layers and pooler layer. "
"Defaults to 768."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:28
msgid ""
"Whether or not to use the same attention length for each token. Defaults "
"to `False`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:31
msgid ""
"The attention type used in the attention layer. Set **\"bi\"** for "
"``XLNet``, **\"uni\"** for ``Transformer-XL``. Defaults to **\"bi\"**."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:34
msgid ""
"Whether or not to use bidirectional input pipeline. Set to `True` during "
"pretraining and `False` during fine-tuning. Defaults to `False`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:37
msgid ""
"Maximum relative distance supported. All relative distances larger than "
"`clamp_len` will be clamped. Setting this attribute to -1 means no "
"clamping. Defaults to -1."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:40
msgid "The number of hidden layers in the encoder. Defaults to 12."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:42
msgid ""
"The dropout ratio for all fully connected layers in the embeddings and "
"encoder. Defaults to 0.1."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:45
msgid ""
"The dropout ratio for all fully connected layers in the pooler "
"(classification head). Defaults to 0.1."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:48
msgid "Number of attention heads in each attention layer. Defaults to 12."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:51
msgid ""
"Dimensionality of each attention head. Defaults to 64.  .. note::     "
"`d_head` should be equal to `d_model` divided by `n_head`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:51
msgid "Dimensionality of each attention head. Defaults to 64."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:54
msgid "`d_head` should be equal to `d_model` divided by `n_head`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:56
msgid ""
"The `epsilon` parameter used in :class:`paddle.nn.LayerNorm` for "
"initializing layer normalization layers. Defaults to 1e-12."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:59
msgid ""
"Dimensionality of the feed-forward (ff) layer in the encoder. Input "
"tensors to ff layers are firstly projected from `d_model` to `d_inner`, "
"and then projected back to `d_model`. Typically `d_inner` is larger than "
"`d_model`. Defaults to 3072."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:64
msgid ""
"The non-linear activation function in the feed-forward layers in the "
"encoder. Choose from the following supported activation functions: "
"`[\"relu\", \"gelu\", \"tanh\", \"sigmoid\", \"mish\", \"swish\"]`. "
"Defaults to `\"gelu\"`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:68
msgid ""
"The standard deviation of the normal initializer. Defaults to 0.02.  .. "
"note::     A normal_initializer initializes weight matrices as normal "
"distributions.     See :meth:`XLNetPretrainedModel._init_weights()` for "
"how weights are initialized in `XLNetModel`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:68
msgid "The standard deviation of the normal initializer. Defaults to 0.02."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel:71
msgid ""
"A normal_initializer initializes weight matrices as normal distributions."
" See :meth:`XLNetPretrainedModel._init_weights()` for how weights are "
"initialized in `XLNetModel`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:1
msgid "The XLNetModel forward method, overrides the `__call__()` special method."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:3
msgid ""
"Indices of input sequence tokens in the vocabulary. They are numerical "
"representations of tokens that build the input sequence. It's data type "
"should be `int64` and has a shape of [batch_size, sequence_length]."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:7
msgid ""
"Segment token indices to indicate first and second portions of the "
"inputs. Indices can be either 0 or 1:  - 0 corresponds to a **sentence "
"A** token, - 1 corresponds to a **sentence B** token.  It's data type "
"should be `int64` and has a shape of [batch_size, sequence_length]. "
"Defaults to None, which means no segment embeddings is added to token "
"embeddings."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:7
msgid ""
"Segment token indices to indicate first and second portions of the "
"inputs. Indices can be either 0 or 1:"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:10
msgid "0 corresponds to a **sentence A** token,"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:11
msgid "1 corresponds to a **sentence B** token."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:13
msgid ""
"It's data type should be `int64` and has a shape of [batch_size, "
"sequence_length]. Defaults to None, which means no segment embeddings is "
"added to token embeddings."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:16
msgid ""
"Mask to indicate whether to perform attention on each input token or not."
" The values should be either 0 or 1. The attention scores will be set to "
"**-infinity** for any positions in the mask that are **0**, and will be "
"**unchanged** for positions that are **1**.  - **1** for tokens that are "
"**not masked**, - **0** for tokens that are **masked**.  It's data type "
"should be `float32` and has a shape of [batch_size, sequence_length]. "
"Defaults to `None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:16
msgid ""
"Mask to indicate whether to perform attention on each input token or not."
" The values should be either 0 or 1. The attention scores will be set to "
"**-infinity** for any positions in the mask that are **0**, and will be "
"**unchanged** for positions that are **1**."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:21
msgid "**1** for tokens that are **not masked**,"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:22
msgid "**0** for tokens that are **masked**."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:24
msgid ""
"It's data type should be `float32` and has a shape of [batch_size, "
"sequence_length]. Defaults to `None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:27
msgid ""
"A list of length `n_layers` with each Tensor being a pre-computed hidden-"
"state for each layer. Each Tensor has a dtype `float32` and a shape of "
"[batch_size, sequence_length, hidden_size]. Defaults to None, and we "
"don't use mems.  .. note::     `use_mems` has to be set to `True` in "
"order to make use of `mems`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:27
msgid ""
"A list of length `n_layers` with each Tensor being a pre-computed hidden-"
"state for each layer. Each Tensor has a dtype `float32` and a shape of "
"[batch_size, sequence_length, hidden_size]. Defaults to None, and we "
"don't use mems."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:32
msgid "`use_mems` has to be set to `True` in order to make use of `mems`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:34
msgid ""
"Mask to indicate the permutation pattern of the input sequence with "
"values being either 0 or 1.  - if ``perm_mask[k, i, j] = 0``, i "
"**attend** to j in batch k; - if ``perm_mask[k, i, j] = 1``, i **does not"
" attend** to j in batch k.  Only used during pretraining (to define "
"factorization order) or for sequential decoding (generation). It's data "
"type should be `float32` and has a shape of [batch_size, sequence_length,"
" sequence_length]. Defaults to `None`, then each token attends to all the"
" other tokens (full bidirectional attention)."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:34
msgid ""
"Mask to indicate the permutation pattern of the input sequence with "
"values being either 0 or 1."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:36
msgid "if ``perm_mask[k, i, j] = 0``, i **attend** to j in batch k;"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:37
msgid "if ``perm_mask[k, i, j] = 1``, i **does not attend** to j in batch k."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:39
msgid ""
"Only used during pretraining (to define factorization order) or for "
"sequential decoding (generation). It's data type should be `float32` and "
"has a shape of [batch_size, sequence_length, sequence_length]. Defaults "
"to `None`, then each token attends to all the other tokens (full "
"bidirectional attention)."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:44
msgid ""
"Mask to indicate the output tokens to use with values being either 0 or "
"1. If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on "
"the j-th token. It's data type should be `float32` and has a shape of "
"[batch_size, num_predict, sequence_length]. Only used during pretraining "
"for partial prediction or for sequential decoding (generation). Defaults "
"to `None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:50
msgid ""
"Mask to avoid performing attention on padding token with values being "
"either 0 or 1. It's data type should be `float32` and it has a shape of "
"[batch_size, sequence_length]. This mask is negative of `attention_mask`:"
"  - 1 for tokens that are **masked**, - 0 for tokens that are **not "
"masked**.  You should use only one of `input_mask` and `attention_mask`. "
"Defaults to `None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:50
msgid ""
"Mask to avoid performing attention on padding token with values being "
"either 0 or 1. It's data type should be `float32` and it has a shape of "
"[batch_size, sequence_length]. This mask is negative of `attention_mask`:"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:54
msgid "1 for tokens that are **masked**,"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:55
msgid "0 for tokens that are **not masked**."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:57
msgid ""
"You should use only one of `input_mask` and `attention_mask`. Defaults to"
" `None`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:59
msgid ""
"Mask to nullify selected heads of the self-attention layers with values "
"being either 0 or 1.  - 1 indicates the head is **not masked**, - 0 "
"indicates the head is **masked**.  It's data type should be `float32` and"
" has a shape of [num_heads] or [num_layers, num_heads]. Defaults to "
"`None`, which means we keep all heads."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:59
msgid ""
"Mask to nullify selected heads of the self-attention layers with values "
"being either 0 or 1."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:61
msgid "1 indicates the head is **not masked**,"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:62
msgid "0 indicates the head is **masked**."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:64
msgid ""
"It's data type should be `float32` and has a shape of [num_heads] or "
"[num_layers, num_heads]. Defaults to `None`, which means we keep all "
"heads."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:67
msgid ""
"An embedded representation tensor which is an alternative of `input_ids`."
" You should specify only either one of them to avoid contradiction. It's "
"data type should be `float32` and has a shape of [batch_size, "
"sequence_length, hidden_size]. Defaults to `None`, which means we only "
"specify `input_ids`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:72
msgid ""
"Whether or not to use recurrent memory mechanism during training. "
"Defaults to `False` and we don't use recurrent memory mechanism in "
"training mode."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:75
msgid ""
"Whether or not to use recurrent memory mechanism during evaluation. "
"Defaults to `False` and we don't use recurrent memory mechanism in "
"evaluation mode."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:78
msgid ""
"Whether or not to return additional information other than the output "
"tensor. If True, then returns information about `output`, `new_mems`, "
"`hidden_states` and `attentions` which will also be formatted as a dict. "
"Else only returns the output tensor. Defaults to False."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward
msgid "返回"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:84
msgid ""
"Returns tensor `output` or a dict with key-value pairs: "
"{\"last_hidden_state\": `output`, \"mems\": `mems`, \"hidden_states\": "
"`hidden_states`, \"attentions\": `attentions`}.  With the corresponding "
"fields:  - `output` (Tensor):     Output of the final layer of the model."
"     It's a Tensor of dtype `float32` and has a shape of [batch_size, "
"num_predict, hidden_size].      .. note::         `num_predict` "
"corresponds to `target_mapping.shape[1]`.         If `target_mapping` is "
"`None`, then `num_predict` equals to `sequence_length`. - `mems` "
"(List[Tensor]):     A list of pre-computed hidden-states. The length of "
"the list is `n_layers`.     Each element in the list is a Tensor with "
"dtype `float32` and has a shape of     [batch_size, sequence_length, "
"hidden_size]. - `hidden_states` (List[Tensor], optional):     A list of "
"Tensor containing hidden-states of the model at the output of each layer"
"     plus the initial embedding outputs. Each Tensor has a data type of "
"`float32` and     has a shape of [batch_size, sequence_length, "
"hidden_size].     Being returned when `output_hidden_states` is set to "
"`True`. - `attentions` (List[Tensor], optional):     A list of Tensor "
"containing attentions weights of each hidden layer.     Each Tensor (one "
"for each layer) has a data type of `float32` and     has a shape of "
"[batch_size, num_heads, sequence_length, sequence_length].     Being "
"returned when `output_attentions` is set to `True`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:84
msgid ""
"Returns tensor `output` or a dict with key-value pairs: "
"{\"last_hidden_state\": `output`, \"mems\": `mems`, \"hidden_states\": "
"`hidden_states`, \"attentions\": `attentions`}."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:32
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:34
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:34
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:88
msgid "With the corresponding fields:"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:95
msgid "`output` (Tensor):"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:91
msgid ""
"Output of the final layer of the model. It's a Tensor of dtype `float32` "
"and has a shape of [batch_size, num_predict, hidden_size]."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:95
msgid ""
"`num_predict` corresponds to `target_mapping.shape[1]`. If "
"`target_mapping` is `None`, then `num_predict` equals to "
"`sequence_length`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:38
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:41
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:37
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:39
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:39
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:99
msgid "`mems` (List[Tensor]):"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:98
msgid ""
"A list of pre-computed hidden-states. The length of the list is "
"`n_layers`. Each element in the list is a Tensor with dtype `float32` and"
" has a shape of [batch_size, sequence_length, hidden_size]."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:40
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:43
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:39
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:41
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:41
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:104
msgid "`hidden_states` (List[Tensor], optional):"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:102
msgid ""
"A list of Tensor containing hidden-states of the model at the output of "
"each layer plus the initial embedding outputs. Each Tensor has a data "
"type of `float32` and has a shape of [batch_size, sequence_length, "
"hidden_size]. Being returned when `output_hidden_states` is set to "
"`True`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:42
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:45
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:41
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:43
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:43
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:109
msgid "`attentions` (List[Tensor], optional):"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:107
msgid ""
"A list of Tensor containing attentions weights of each hidden layer. Each"
" Tensor (one for each layer) has a data type of `float32` and has a shape"
" of [batch_size, num_heads, sequence_length, sequence_length]. Being "
"returned when `output_attentions` is set to `True`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward
msgid "返回类型"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:47
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:50
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:46
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:48
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:48
#: paddlenlp.transformers.xlnet.modeling.XLNetModel.forward:114
msgid "示例"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification:1
msgid ""
"XLNet Model with a linear layer on top of the output layer, designed for "
"sequence classification/regression tasks like GLUE tasks."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice:4
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering:4
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification:4
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification:4
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel:3
msgid "An instance of :class:`XLNetModel`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification:6
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification:6
msgid "The number of classes. Defaults to 2."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:1
msgid ""
"The XLNetForSequenceClassification forward method, overrides the "
"`__call__()` special method."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:3
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:5
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:7
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:9
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:11
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:13
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:15
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:17
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:19
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:21
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:23
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:25
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:39
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:41
#: paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:43
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:3
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:5
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:7
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:9
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:11
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:13
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:15
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:17
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:19
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:21
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:23
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:25
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:42
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:44
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:46
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:3
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:5
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:7
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:9
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:11
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:13
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:15
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:17
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:19
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:21
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:23
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:25
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:38
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:40
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:42
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:3
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:5
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:7
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:9
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:11
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:13
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:15
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:17
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:19
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:21
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:23
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:25
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:40
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:42
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:44
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:3
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:5
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:7
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:9
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:11
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:13
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:15
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:17
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:19
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:21
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:23
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:25
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:40
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:42
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:44
msgid "See :class:`XLNetModel`."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:28
msgid ""
"Returns tensor `logits` or a dict with key-value pairs: {\"logits\": "
"`logits`, \"mems\": `mems`, \"hidden_states\": `hidden_states`, "
"\"attentions\": `attentions`}.  With the corresponding fields:  - "
"`logits` (Tensor):     Classification scores before SoftMax (also called "
"logits). It's data type should be `float32`     and has a shape of "
"[batch_size, num_classes]. - `mems` (List[Tensor]):     See "
":class:`XLNetModel`. - `hidden_states` (List[Tensor], optional):     See "
":class:`XLNetModel`. - `attentions` (List[Tensor], optional):     See "
":class:`XLNetModel`."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:28
msgid ""
"Returns tensor `logits` or a dict with key-value pairs: {\"logits\": "
"`logits`, \"mems\": `mems`, \"hidden_states\": `hidden_states`, "
"\"attentions\": `attentions`}."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:35
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:37
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:37
msgid "`logits` (Tensor):"
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForSequenceClassification.forward:35
msgid ""
"Classification scores before SoftMax (also called logits). It's data type"
" should be `float32` and has a shape of [batch_size, num_classes]."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification:1
msgid ""
"XLNet Model with a linear layer on top of the hidden-states output layer,"
" designed for token classification tasks like NER tasks."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:1
msgid ""
"The XLNetForTokenClassification forward method, overrides the "
"`__call__()` special method."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:28
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:28
msgid ""
"Returns tensor `logits` or a dict with key-value pairs:  {\"logits\": "
"`logits`, \"mems\": `mems`, \"hidden_states\": `hidden_states`, "
"\"attentions\": `attentions`}.  With the corresponding fields:  - "
"`logits` (Tensor):     Classification scores before SoftMax (also called "
"logits). It's data type should be `float32`     and has a shape of "
"[batch_size, sequence_length, num_classes]. - `mems` (List[Tensor]):     "
"See :class:`XLNetModel`. - `hidden_states` (List[Tensor], optional):     "
"See :class:`XLNetModel`. - `attentions` (List[Tensor], optional):     See"
" :class:`XLNetModel`."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:30
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:30
msgid "Returns tensor `logits` or a dict with key-value pairs:"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:31
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:31
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:31
msgid "{\"logits\": `logits`, \"mems\": `mems`,"
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:32
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:32
msgid "\"hidden_states\": `hidden_states`, \"attentions\": `attentions`}."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:36
#: paddlenlp.transformers.xlnet.modeling.XLNetForTokenClassification.forward:37
#: paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:37
msgid ""
"Classification scores before SoftMax (also called logits). It's data type"
" should be `float32` and has a shape of [batch_size, sequence_length, "
"num_classes]."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel:1
msgid ""
"XLNet Model with a language modeling head on top (linear layer with "
"weights tied to the input embeddings)."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetLMHeadModel.forward:1
msgid ""
"The XLNetLMHeadModel forward method, overrides the `__call__()` special "
"method."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice:1
msgid ""
"XLNet Model with a multiple choice classification head on top (a linear "
"layer on top of the pooled output and a softmax) e.g. for RACE/SWAG "
"tasks."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:1
msgid ""
"The XLNetForMultipleChoice forward method, overrides the `__call__()` "
"special method."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:28
msgid ""
"Returns tensor `logtis` or a dict with key-value pairs:  {\"logits\": "
"`logits`, \"mems\": `mems`, \"hidden_states\": `hidden_states`, "
"\"attentions\": `attentions`}  With the corresponding fields: - `logits` "
"(Tensor):     Classification scores before SoftMax (also called logits). "
"It's data type should be `float32`     and has a shape of [batch_size, "
"sequence_length, num_classes]. - `mems` (List[Tensor]):     See "
":class:`XLNetModel`. - `hidden_states` (List[Tensor], optional):     See "
":class:`XLNetModel`. - `attentions` (List[Tensor], optional):     See "
":class:`XLNetModel`."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:30
msgid "Returns tensor `logtis` or a dict with key-value pairs:"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:32
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:32
msgid "\"hidden_states\": `hidden_states`, \"attentions\": `attentions`}"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForMultipleChoice.forward:34
msgid "With the corresponding fields: - `logits` (Tensor):"
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering:1
msgid ""
"XLNet Model with a span classification head on top for extractive "
"question-answering tasks like SQuAD (a linear layers on top of the "
"hidden-states output to compute `span start logits` and `span end "
"logits`)."
msgstr ""

#: of paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:1
msgid ""
"The XLNetForQuestionAnswering forward method, overrides the `__call__()` "
"special method."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:28
msgid ""
"Returns tensor (`start_logits`, `end_logits`) or a dict with key-value "
"pairs:  {\"start_logits\": `start_logits`, \"end_logits\": `end_logits`, "
"\"mems\": `mems`, \"hidden_states\": `hidden_states`, \"attentions\": "
"`attentions`}  With the corresponding fields: - `start_logits` (Tensor):"
"     A tensor of the input token classification logits, indicates the "
"start position of the labelled span.     Its data type should be float32 "
"and its shape is [batch_size, sequence_length]. - `end_logits` (Tensor):"
"     A tensor of the input token classification logits, indicates the end"
" position of the labelled span.     Its data type should be float32 and "
"its shape is [batch_size, sequence_length]. - `mems` (List[Tensor]):     "
"See :class:`XLNetModel`. - `hidden_states` (List[Tensor], optional):     "
"See :class:`XLNetModel`. - `attentions` (List[Tensor], optional):     See"
" :class:`XLNetModel`."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:30
msgid ""
"Returns tensor (`start_logits`, `end_logits`) or a dict with key-value "
"pairs:"
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:31
msgid ""
"{\"start_logits\": `start_logits`, \"end_logits\": `end_logits`, "
"\"mems\": `mems`,"
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:34
msgid "With the corresponding fields: - `start_logits` (Tensor):"
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:36
msgid ""
"A tensor of the input token classification logits, indicates the start "
"position of the labelled span. Its data type should be float32 and its "
"shape is [batch_size, sequence_length]."
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:39
msgid "`end_logits` (Tensor):"
msgstr ""

#: of
#: paddlenlp.transformers.xlnet.modeling.XLNetForQuestionAnswering.forward:39
msgid ""
"A tensor of the input token classification logits, indicates the end "
"position of the labelled span. Its data type should be float32 and its "
"shape is [batch_size, sequence_length]."
msgstr ""

