# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Luke model configuration"""
from __future__ import annotations

from paddlenlp.transformers.configuration_utils import PretrainedConfig

__all__ = [
    "LUKE_PRETRAINED_INIT_CONFIGURATION",
    "LUKE_PRETRAINED_RESOURCE_FILES_MAP",
    "LukeConfig",
]

LUKE_PRETRAINED_INIT_CONFIGURATION = {
    "luke-base": {
        "attention_probs_dropout_prob": 0.1,
        "hidden_act": "gelu",
        "pad_token_id": 1,
        "hidden_dropout_prob": 0.1,
        "hidden_size": 768,
        "initializer_range": 0.02,
        "intermediate_size": 3072,
        "max_position_embeddings": 514,
        "num_attention_heads": 12,
        "num_hidden_layers": 12,
        "type_vocab_size": 1,
        "vocab_size": 50267,
    },
    "luke-large": {
        "attention_probs_dropout_prob": 0.1,
        "hidden_act": "gelu",
        "pad_token_id": 1,
        "hidden_dropout_prob": 0.1,
        "hidden_size": 1024,
        "initializer_range": 0.02,
        "intermediate_size": 4096,
        "max_position_embeddings": 514,
        "num_attention_heads": 16,
        "num_hidden_layers": 24,
        "type_vocab_size": 1,
        "vocab_size": 50267,
    },
}

LUKE_PRETRAINED_RESOURCE_FILES_MAP = {
    "model_state": {
        "luke-base": "https://bj.bcebos.com/paddlenlp/models/transformers/luke/luke-base/model_state.pdparams",
        "luke-large": "https://bj.bcebos.com/paddlenlp/models/transformers/luke/luke-large/model_state.pdparams",
    }
}


class LukeConfig(PretrainedConfig):
    r"""
    Args:
       vocab_size (int, optional):
           Vocabulary size of `inputs_ids` in `LukeModel`. Also is the vocab size of token embedding matrix.
           Defines the number of different tokens that can be represented by the `inputs_ids` passed when
           calling `LukeModel`. Defaults to 50267.
       hidden_size (int, optional):
           Dimensionality of the embedding layer, encoder layer and pooler layer. Defaults to `768`.
       num_hidden_layers (int, optional):
           Number of hidden layers in the Transformer encoder. Defaults to `12`.
       num_attention_heads (int, optional):
           Number of attention heads for each attention layer in the Transformer encoder.
           Defaults to `12`.
       intermediate_size (int, optional):
           Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors
           to ff layers are firstly projected from `hidden_size` to `intermediate_size`,
           and then projected back to `hidden_size`. Typically `intermediate_size` is larger than `hidden_size`.
           Defaults to `3072`.
       hidden_act (str, optional):
           The non-linear activation function in the feed-forward layer.
           ``"gelu"``, ``"relu"`` and any other paddle supported activation functions
           are supported. Defaults to `"gelu"`.
       hidden_dropout_prob (float, optional):
           The dropout probability for all fully connected layers in the embeddings and encoder.
           Defaults to `0.1`.
       attention_probs_dropout_prob (float, optional):
           The dropout probability used in MultiHeadAttention in all encoder layers to drop some attention target.
           Defaults to `0.1`.
       max_position_embeddings (int, optional):
           The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input
           sequence. Defaults to `514`.
       type_vocab_size (int, optional):
           The vocabulary size of `token_type_ids`.
           Defaults to `1`.
       entity_vocab_size (int, optional):
           Vocabulary size of `entity_ids` in `LukeModel`. Also is the vocab size of token entity embedding matrix.
           Defines the number of different entity that can be represented by the `entity_ids` passed when
           calling `LukeModel`. Defaults to 500000.
       entity_emb_size (int, optional):
           Dimensionality of the entity embedding layer Defaults to `256`.
       initializer_range (float, optional):
           The standard deviation of the normal initializer.
           Defaults to 0.02.

           .. note::
               A normal_initializer initializes weight matrices as normal distributions.
               See :meth:`BertPretrainedModel.init_weights()` for how weights are initialized in `BertModel`.

       pad_token_id (int, optional):
           The index of padding token in the token vocabulary.
           Defaults to `1`.
       entity_pad_token_id (int, optional):
           The index of padding token in the token vocabulary.
           Defaults to `0`.
    """
    model_type = "luke"

    def __init__(
        self,
        vocab_size=50267,
        hidden_size=768,
        num_hidden_layers=12,
        num_attention_heads=12,
        intermediate_size=3072,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=514,
        type_vocab_size=1,
        entity_vocab_size=500000,
        entity_emb_size=256,
        initializer_range=0.02,
        pad_token_id=1,
        entity_pad_token_id=0,
        cls_token_id=101,
        **kwargs,
    ):
        super().__init__(**kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.entity_vocab_size = entity_vocab_size
        self.entity_emb_size = entity_emb_size
        self.initializer_range = initializer_range
        self.pad_token_id = pad_token_id
        self.entity_pad_token_id = entity_pad_token_id
        self.cls_token_id = cls_token_id
