# coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MindSpore Longformer model."""

import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union

import numpy as np
import mindspore
from mindspore import ops, nn, Parameter, Tensor
from mindspore.common.initializer import initializer, Normal

from mindnlp.utils import (
    ModelOutput,
    logging,
)
from ...activations import ACT2FN, gelu
from ...modeling_utils import PreTrainedModel
from ...ms_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from .configuration_longformer import LongformerConfig


logger = logging.get_logger(__name__)

_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096"
_CONFIG_FOR_DOC = "LongformerConfig"

LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "allenai/longformer-base-4096",
    "allenai/longformer-large-4096",
    "allenai/longformer-large-4096-finetuned-triviaqa",
    "allenai/longformer-base-4096-extra.pos.embd.only",
    "allenai/longformer-large-4096-extra.pos.embd.only",
    # See all Longformer models at https://hf-mirror.com/models?filter=longformer
]

def scalar_div(input, other, *, rounding_mode="trunc"):
    """scalar div since ops.div do not support scalar"""
    if rounding_mode == 'trunc':
        res = input // other
        if res < 0:
            res = res + 1
        return res
    if rounding_mode == 'floor':
        return input // other
    return input / other

@dataclass
class LongformerBaseModelOutput(ModelOutput):
    """
    Base class for Longformer's outputs, with potential hidden states, local and global attentions.

    Args:
        last_hidden_state (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    last_hidden_state: mindspore.Tensor
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


@dataclass
class LongformerBaseModelOutputWithPooling(ModelOutput):
    """
    Base class for Longformer's outputs that also contains a pooling of the last hidden states.

    Args:
        last_hidden_state (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`mindspore.Tensor` of shape `(batch_size, hidden_size)`):
            Last layer hidden-state of the first token of the sequence (classification token) further processed by a
            Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
            prediction (classification) objective during pretraining.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    last_hidden_state: mindspore.Tensor
    pooler_output: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


@dataclass
class LongformerMaskedLMOutput(ModelOutput):
    """
    Base class for masked language models outputs.

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Masked language modeling (MLM) loss.
        logits (`mindspore.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


@dataclass
class LongformerQuestionAnsweringModelOutput(ModelOutput):
    """
    Base class for outputs of question answering Longformer models.

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
        start_logits (`mindspore.Tensor` of shape `(batch_size, sequence_length)`):
            Span-start scores (before SoftMax).
        end_logits (`mindspore.Tensor` of shape `(batch_size, sequence_length)`):
            Span-end scores (before SoftMax).
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    loss: Optional[mindspore.Tensor] = None
    start_logits: mindspore.Tensor = None
    end_logits: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


@dataclass
class LongformerSequenceClassifierOutput(ModelOutput):
    """
    Base class for outputs of sentence classification models.

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`mindspore.Tensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


@dataclass
class LongformerMultipleChoiceModelOutput(ModelOutput):
    """
    Base class for outputs of multiple choice Longformer models.

    Args:
        loss (`mindspore.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
            Classification loss.
        logits (`mindspore.Tensor` of shape `(batch_size, num_choices)`):
            *num_choices* is the second dimension of the input tensors. (see *input_ids* above).

            Classification scores (before SoftMax).
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


@dataclass
class LongformerTokenClassifierOutput(ModelOutput):
    """
    Base class for outputs of token classification models.

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
            Classification loss.
        logits (`mindspore.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
            Classification scores (before SoftMax).
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
            attention_window + 1)`, where `x` is the number of tokens with global attention mask.

            Local attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token in the sequence to every token with
            global attention (first `x` values) and to every token in the attention window (remaining `attention_window
            + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
            remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
            token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
            (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
            If the attention window contains a token with global attention, the attention weight at the corresponding
            index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
            attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
            accessed from `global_attentions`.
        global_attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
            where `x` is the number of tokens with global attention mask.

            Global attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads. Those are the attention weights from every token with global attention to every token
            in the sequence.
    """
    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None
    global_attentions: Optional[Tuple[mindspore.Tensor]] = None


def _get_question_end_index(input_ids, sep_token_id):
    """
    Computes the index of the first occurrence of `sep_token_id`.
    """
    sep_token_indices = (input_ids == sep_token_id).nonzero()
    batch_size = input_ids.shape[0]

    assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
    assert sep_token_indices.shape[0] == 3 * batch_size, (
        f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You"
        " might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
    )
    return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]


def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
    """
    Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
    True` else after `sep_token_id`.
    """
    question_end_index = _get_question_end_index(input_ids, sep_token_id)
    question_end_index = question_end_index.unsqueeze(dim=1)  # size: batch_size x 1
    # bool attention mask with True in locations of global attention
    attention_mask = ops.arange(input_ids.shape[1])
    if before_sep_token is True:
        attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(mindspore.bool_)
    else:
        # last token is separation token and should not be counted and in the middle are two separation tokens
        attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(mindspore.bool_) * (
            attention_mask.expand_as(input_ids) < input_ids.shape[-1]
        ).to(mindspore.bool_)

    return attention_mask


def create_position_ids_from_input_ids(input_ids, padding_idx):
    """
    Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
    are ignored. This is modified from fairseq's `utils.make_positions`.

    Args:
        x: mindspore.Tensor x:

    Returns: mindspore.Tensor
    """
    # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
    mask = input_ids.ne(padding_idx).int()
    incremental_indices = ops.cumsum(mask, axis=1).astype(mask.dtype) * mask
    return incremental_indices.long() + padding_idx


class LongformerEmbeddings(nn.Cell):
    """
    Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
    """
    def __init__(self, config):
        """
        Initializes an instance of the LongformerEmbeddings class.
        
        Args:
            self (object): The instance of the class.
            config (object): An object containing configuration parameters for the embeddings.
                - vocab_size (int): The size of the vocabulary.
                - hidden_size (int): The size of the hidden layer.
                - pad_token_id (int): The index of the padding token.
                - type_vocab_size (int): The size of the type vocabulary.
                - layer_norm_eps (float): The epsilon value for layer normalization.
                - hidden_dropout_prob (float): The dropout probability for the hidden layer.
                - max_position_embeddings (int): The maximum position for positional embeddings.
        
        Returns:
            None: This method does not return any value.
        
        Raises:
            TypeError: If the config parameter is not of the expected type.
            ValueError: If the vocab_size, hidden_size, pad_token_id, type_vocab_size, layer_norm_eps, hidden_dropout_prob, or max_position_embeddings are not within the expected ranges.
        """
        super().__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
        self.LayerNorm = nn.LayerNorm([config.hidden_size], epsilon=config.layer_norm_eps)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

        self.padding_idx = config.pad_token_id
        self.position_embeddings = nn.Embedding(
            config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
        )

    def construct(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
        '''
        Constructs the LongformerEmbeddings.
        
        Args:
            self (LongformerEmbeddings): The instance of the LongformerEmbeddings class.
            input_ids (Optional[Tensor]): The input tensor of shape (batch_size, sequence_length).
                Each element represents the token id of a word in the input sequence.
                Default: None.
            token_type_ids (Optional[Tensor]): The tensor of shape (batch_size, sequence_length).
                Each element represents the token type id of a word in the input sequence.
                Default: None.
            position_ids (Optional[Tensor]): The tensor of shape (batch_size, sequence_length).
                Each element represents the position id of a word in the input sequence.
                Default: None.
            inputs_embeds (Optional[Tensor]): The tensor of shape (batch_size, sequence_length, embedding_size).
                Each element represents the embedding vector of a word in the input sequence.
                Default: None.
        
        Returns:
            Tensor: The output tensor of shape (batch_size, sequence_length, embedding_size).
            Each element represents the embedding vector of a word in the input sequence.
            The embedding vector is obtained by adding the input word embeddings, position embeddings,
            and token type embeddings. The resulting tensor is then passed through LayerNorm and dropout.
        
        Raises:
            None.
        '''
        if position_ids is None:
            if input_ids is not None:
                # Create the position ids from the input token ids. Any padded tokens remain padded.
                position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx)
            else:
                position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)

        if input_ids is not None:
            input_shape = input_ids.shape
        else:
            input_shape = inputs_embeds.shape[:-1]

        if token_type_ids is None:
            token_type_ids = ops.zeros(input_shape, dtype=mindspore.int64)

        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = inputs_embeds + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

    def create_position_ids_from_inputs_embeds(self, inputs_embeds):
        """
        We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.

        Args:
            inputs_embeds: mindspore.Tensor inputs_embeds:

        Returns: mindspore.Tensor
        """
        input_shape = inputs_embeds.shape[:-1]
        sequence_length = input_shape[1]

        position_ids = ops.arange(
            self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=mindspore.int64
        )
        return position_ids.unsqueeze(0).broadcast_to(input_shape)


class LongformerSelfAttention(nn.Cell):

    """
    This class represents the self-attention mechanism used in Longformer models. It handles the computation of attention scores and outputs for both local and global attention patterns, with support for
sliding window attention. Inherits from nn.Cell.
    
    The class includes methods for initializing the self-attention layer, constructing the attention mechanism, padding and processing hidden states, and computing attention outputs based on global indices. It
also provides functions for matrix multiplication with sliding window attention patterns and handling global attention indices.
    
    The LongformerSelfAttention class is designed to work seamlessly within Longformer models, ensuring efficient and accurate attention computations for both local and global contexts.
    
    For detailed information on each method and its functionality, refer to the specific method documentation within the class implementation.
    """
    def __init__(self, config, layer_id):
        """
        Initializes the LongformerSelfAttention class.
        
        Args:
            self: The instance of the class.
            config: An object containing the configuration parameters for the LongformerSelfAttention layer, including hidden_size, num_attention_heads, attention_probs_dropout_prob, and attention_window.
Type: object. Restrictions: Must contain the specified configuration parameters.
            layer_id: The ID of the layer. Type: int. Purpose: Identifies the specific layer within the LongformerSelfAttention. Restrictions: Must be a valid layer ID.
        
        Returns:
            None. This method does not return any value.
        
        Raises:
            ValueError: If the hidden size is not a multiple of the number of attention heads.
            AssertionError: If the attention_window value is not an even number or if it is not positive.
        """
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
                f"heads ({config.num_attention_heads})"
            )
        self.num_heads = config.num_attention_heads
        self.head_dim = int(config.hidden_size / config.num_attention_heads)
        self.embed_dim = config.hidden_size

        self.query = nn.Dense(config.hidden_size, self.embed_dim)
        self.key = nn.Dense(config.hidden_size, self.embed_dim)
        self.value = nn.Dense(config.hidden_size, self.embed_dim)

        # separate projection layers for tokens with global attention
        self.query_global = nn.Dense(config.hidden_size, self.embed_dim)
        self.key_global = nn.Dense(config.hidden_size, self.embed_dim)
        self.value_global = nn.Dense(config.hidden_size, self.embed_dim)

        self.dropout = config.attention_probs_dropout_prob

        self.layer_id = layer_id
        attention_window = config.attention_window[self.layer_id]
        assert (
            attention_window % 2 == 0
        ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
        assert (
            attention_window > 0
        ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"

        self.one_sided_attn_window_size = attention_window // 2

        self.config = config

    def construct(
        self,
        hidden_states,
        attention_mask=None,
        layer_head_mask=None,
        is_index_masked=None,
        is_index_global_attn=None,
        is_global_attn=None,
        output_attentions=False,
    ):
        """
        [`LongformerSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
        *attention_window* happens in [`LongformerModel.forward`] to avoid redoing the padding on each layer.

        The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to:

            - -10000: no attention
            - 0: local attention
            - +10000: global attention
        """
        hidden_states = hidden_states.swapaxes(0, 1)

        # project hidden states
        query_vectors = self.query(hidden_states)
        key_vectors = self.key(hidden_states)
        value_vectors = self.value(hidden_states)

        seq_len, batch_size, embed_dim = hidden_states.shape
        assert (
            embed_dim == self.embed_dim
        ), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"

        # normalize query
        query_vectors /= math.sqrt(self.head_dim)

        query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).swapaxes(0, 1)
        key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).swapaxes(0, 1)

        attn_scores = self._sliding_chunks_query_key_matmul(
            query_vectors, key_vectors, self.one_sided_attn_window_size
        )

        # values to pad for attention probs
        remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]

        # cast to fp32/fp16 then replace 1's with -inf
        float_mask = remove_from_windowed_attention_mask.astype(query_vectors.dtype).masked_fill(
            remove_from_windowed_attention_mask, float(np.finfo(mindspore.dtype_to_nptype(query_vectors.dtype)).min)
        )
        # diagonal mask with zeros everywhere and -inf inplace of padding
        diagonal_mask = self._sliding_chunks_query_key_matmul(
            float_mask.new_ones(size=float_mask.shape), float_mask, self.one_sided_attn_window_size
        )

        # pad local attention probs
        attn_scores += diagonal_mask

        assert list(attn_scores.shape) == [
            batch_size,
            seq_len,
            self.num_heads,
            self.one_sided_attn_window_size * 2 + 1,
        ], (
            f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},"
            f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.shape}"
        )

        # compute local attention probs from global attention keys and contact over window dim
        if is_global_attn:
            # compute global attn indices required through out forward fn
            (
                max_num_global_attn_indices,
                is_index_global_attn_nonzero,
                is_local_index_global_attn_nonzero,
                is_local_index_no_global_attn_nonzero,
            ) = self._get_global_attn_indices(is_index_global_attn)
            # calculate global attn probs from global key
            global_key_attn_scores = self._concat_with_global_key_attn_probs(
                query_vectors=query_vectors,
                key_vectors=key_vectors,
                max_num_global_attn_indices=max_num_global_attn_indices,
                is_index_global_attn_nonzero=is_index_global_attn_nonzero,
                is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
                is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
            )
            # concat to local_attn_probs
            # (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
            attn_scores = ops.cat((global_key_attn_scores, attn_scores), axis=-1)

            # free memory
            del global_key_attn_scores

        attn_probs = ops.softmax(
            attn_scores, axis=-1, dtype=mindspore.float32
        )  # use fp32 for numerical stability

        if layer_head_mask is not None:
            assert layer_head_mask.shape == (
                self.num_heads,
            ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.shape}"
            attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs

        # softmax sometimes inserts NaN if all positions are masked, replace them with 0
        attn_probs = attn_probs.masked_fill(is_index_masked[:, :, None, None], 0.0)
        attn_probs = attn_probs.astype(attn_scores.dtype)

        # free memory
        del attn_scores

        # apply dropout
        attn_probs = ops.dropout(attn_probs, p=self.dropout, training=self.training)

        value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).swapaxes(0, 1)

        # compute local attention output with global attention value and add
        if is_global_attn:
            # compute sum of global and local attn
            attn_output = self._compute_attn_output_with_global_indices(
                value_vectors=value_vectors,
                attn_probs=attn_probs,
                max_num_global_attn_indices=max_num_global_attn_indices,
                is_index_global_attn_nonzero=is_index_global_attn_nonzero,
                is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
            )
        else:
            # compute local attn only
            attn_output = self._sliding_chunks_matmul_attn_probs_value(
                attn_probs, value_vectors, self.one_sided_attn_window_size
            )

        assert attn_output.shape == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
        attn_output = attn_output.swapaxes(0, 1).reshape(seq_len, batch_size, embed_dim)

        # compute value for global attention and overwrite to attention output
        # TODO: remove the redundant computation
        if is_global_attn:
            global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
                hidden_states=hidden_states,
                max_num_global_attn_indices=max_num_global_attn_indices,
                layer_head_mask=layer_head_mask,
                is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
                is_index_global_attn_nonzero=is_index_global_attn_nonzero,
                is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
                is_index_masked=is_index_masked,
            )

            # get only non zero global attn output
            nonzero_global_attn_output = global_attn_output[
                is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
            ]

            # overwrite values with global attention
            attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
                len(is_local_index_global_attn_nonzero[0]), -1
            )
            # The attention weights for tokens with global attention are
            # just filler values, they were never used to compute the output.
            # Fill with 0 now, the correct values are in 'global_attn_probs'.
            attn_probs[is_index_global_attn_nonzero] = 0

        outputs = (attn_output.swapaxes(0, 1),)

        if output_attentions:
            outputs += (attn_probs,)

        return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs

    @staticmethod
    def _pad_and_swapaxes_last_two_dims(hidden_states_padded, padding):
        """pads rows and then flips rows and columns"""
        hidden_states_padded = ops.pad(
            hidden_states_padded, padding
        )  # padding value is not important because it will be overwritten
        hidden_states_padded = hidden_states_padded.view(
            *hidden_states_padded.shape[:-2], hidden_states_padded.shape[-1], hidden_states_padded.shape[-2]
        )
        return hidden_states_padded

    @staticmethod
    def _pad_and_diagonalize(chunked_hidden_states):
        """
        shift every row 1 step right, converting columns into diagonals.

        Example:

        ```python
        chunked_hidden_states: [
            0.4983,
            2.6918,
            -0.0071,
            1.0492,
            -1.8348,
            0.7672,
            0.2986,
            0.0285,
            -0.7584,
            0.4206,
            -0.0405,
            0.1599,
            2.0514,
            -1.1600,
            0.5372,
            0.2629,
        ]
        window_overlap = num_rows = 4
        ```

                     (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
                       0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
                       -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
        """
        total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.shape
        chunked_hidden_states = ops.pad(
            chunked_hidden_states, (0, window_overlap + 1)
        )  # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
        chunked_hidden_states = chunked_hidden_states.view(
            total_num_heads, num_chunks, -1
        )  # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
        chunked_hidden_states = chunked_hidden_states[
            :, :, :-window_overlap
        ]  # total_num_heads x num_chunks x window_overlap*window_overlap
        chunked_hidden_states = chunked_hidden_states.view(
            total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
        )
        chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
        return chunked_hidden_states

    @staticmethod
    def _chunk(hidden_states, window_overlap, onnx_export: bool = False):
        """convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
        if not onnx_export:
            # non-overlapping chunks of size = 2w
            hidden_states = hidden_states.view(
                hidden_states.shape[0],
                scalar_div(hidden_states.shape[1], (window_overlap * 2), rounding_mode="trunc"),
                window_overlap * 2,
                hidden_states.shape[2],
            )
            # use `as_strided` to make the chunks overlap with an overlap size = window_overlap
            chunk_size = list(hidden_states.shape)
            chunk_size[1] = chunk_size[1] * 2 - 1

            chunk_stride = list(hidden_states.stride())
            chunk_stride[1] = chunk_stride[1] // 2
            return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)

        # When exporting to ONNX, use this separate logic
        # have to use slow implementation since as_strided, unfold and 2d-tensor indexing aren't supported (yet) in ONNX export

        # TODO replace this with
        # > return hidden_states.unfold(dimension=1, size=window_overlap * 2, step=window_overlap).swapaxes(2, 3)
        # once `unfold` is supported
        # the case hidden_states.shape[1] == window_overlap * 2 can also simply return hidden_states.unsqueeze(1), but that's control flow

        chunk_size = [
            hidden_states.shape[0],
            scalar_div(hidden_states.shape[1], window_overlap, rounding_mode="trunc") - 1,
            window_overlap * 2,
            hidden_states.shape[2],
        ]

        overlapping_chunks = mindspore.zeros(chunk_size)
        for chunk in range(chunk_size[1]):
            overlapping_chunks[:, chunk, :, :] = hidden_states[
                :, chunk * window_overlap : chunk * window_overlap + 2 * window_overlap, :
            ]
        return overlapping_chunks

    @staticmethod
    def _mask_invalid_locations(input_tensor, affected_seq_len) -> mindspore.Tensor:
        """
        The '_mask_invalid_locations' method in the class 'LongformerSelfAttention' applies masks to invalidate certain locations in the input tensor.
        
        Args:
            input_tensor (mindspore.Tensor): The input tensor to which the masks will be applied.
                It should be a 4-dimensional tensor representing the input data.
            affected_seq_len (int): The length of the sequence affected by the masks.
                It specifies the number of elements in the sequence to which the masks will be applied.
        
        Returns:
            mindspore.Tensor: Returns a tensor of the same shape as the input tensor with masks applied to invalidate certain locations.
        
        Raises:
            ValueError: If the affected_seq_len is not a positive integer.
            TypeError: If the input_tensor is not of type mindspore.Tensor.
            RuntimeError: If there is a runtime issue during the mask application process.
        """
        beginning_mask_2d = input_tensor.new_ones((affected_seq_len, affected_seq_len + 1)).tril().flip(dims=[0])
        beginning_mask = beginning_mask_2d[None, :, None, :]
        ending_mask = beginning_mask.flip(dims=(1, 3))
        beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
        beginning_mask = beginning_mask.broadcast_to(beginning_input.shape)
        input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] = ops.full_like(
            beginning_input, -float("inf")
        ).where(beginning_mask.bool(), beginning_input)
        ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
        ending_mask = ending_mask.broadcast_to(ending_input.shape)
        input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] = ops.full_like(
            ending_input, -float("inf")
        ).where(ending_mask.bool(), ending_input)

    def _sliding_chunks_query_key_matmul(self, query: mindspore.Tensor, key: mindspore.Tensor, window_overlap: int):
        """
        Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
        implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
        overlap of size window_overlap
        """
        batch_size, seq_len, num_heads, head_dim = query.shape
        assert (
            seq_len % (window_overlap * 2) == 0
        ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
        assert query.shape == key.shape

        chunks_count = scalar_div(seq_len, window_overlap, rounding_mode="trunc") - 1

        # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
        query = query.swapaxes(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
        key = key.swapaxes(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)

        query = self._chunk(query, window_overlap, getattr(self.config, "onnx_export", False))
        key = self._chunk(key, window_overlap, getattr(self.config, "onnx_export", False))

        # matrix multiplication
        # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
        # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
        # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
        diagonal_chunked_attention_scores = ops.einsum("bcxd,bcyd->bcxy", (query, key))  # multiply

        # convert diagonals into columns
        diagonal_chunked_attention_scores = self._pad_and_swapaxes_last_two_dims(
            diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
        )

        # allocate space for the overall attention matrix where the chunks are combined. The last dimension
        # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
        # window_overlap previous words). The following column is attention score from each word to itself, then
        # followed by window_overlap columns for the upper triangle.

        diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros(
            (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
        )

        # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
        # - copying the main diagonal and the upper triangle
        diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
            :, :, :window_overlap, : window_overlap + 1
        ]
        diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
            :, -1, window_overlap:, : window_overlap + 1
        ]
        # - copying the lower triangle
        diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
            :, :, -(window_overlap + 1) : -1, window_overlap + 1 :
        ]

        diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
            :, 0, : window_overlap - 1, 1 - window_overlap :
        ]

        # separate batch_size and num_heads dimensions again
        diagonal_attention_scores = diagonal_attention_scores.view(
            batch_size, num_heads, seq_len, 2 * window_overlap + 1
        ).swapaxes(2, 1)

        self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
        return diagonal_attention_scores

    def _sliding_chunks_matmul_attn_probs_value(
        self, attn_probs: mindspore.Tensor, value: mindspore.Tensor, window_overlap: int
    ):
        """
        Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
        same shape as `attn_probs`
        """
        batch_size, seq_len, num_heads, head_dim = value.shape

        assert seq_len % (window_overlap * 2) == 0
        assert attn_probs.shape[:3] == value.shape[:3]
        assert attn_probs.shape[3] == 2 * window_overlap + 1
        chunks_count = scalar_div(seq_len, window_overlap, rounding_mode="trunc") - 1
        # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap

        chunked_attn_probs = attn_probs.swapaxes(1, 2).reshape(
            batch_size * num_heads,
            scalar_div(seq_len, window_overlap, rounding_mode="trunc"),
            window_overlap,
            2 * window_overlap + 1,
        )

        # group batch_size and num_heads dimensions into one
        value = value.swapaxes(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)

        # pad seq_len with w at the beginning of the sequence and another window overlap at the end
        padded_value = ops.pad(value, (0, 0, window_overlap, window_overlap), value=-1)

        # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
        chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
        chunked_value_stride = padded_value.stride()
        chunked_value_stride = (
            chunked_value_stride[0],
            window_overlap * chunked_value_stride[1],
            chunked_value_stride[1],
            chunked_value_stride[2],
        )
        chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)

        chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)

        context = ops.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
        return context.view(batch_size, num_heads, seq_len, head_dim).swapaxes(1, 2)

    @staticmethod
    def _get_global_attn_indices(is_index_global_attn):
        """compute global attn indices required throughout forward pass"""
        # helper variable
        num_global_attn_indices = is_index_global_attn.long().sum(axis=1)

        # max number of global attn indices in batch
        max_num_global_attn_indices = num_global_attn_indices.max().item()

        # indices of global attn
        is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
        # helper variable
        is_local_index_global_attn = ops.arange(max_num_global_attn_indices) < num_global_attn_indices.unsqueeze(dim=-1)

        # location of the non-padding values within global attention indices
        is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)

        # location of the padding values within global attention indices
        is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
        return (
            max_num_global_attn_indices,
            is_index_global_attn_nonzero,
            is_local_index_global_attn_nonzero,
            is_local_index_no_global_attn_nonzero,
        )

    def _concat_with_global_key_attn_probs(
        self,
        key_vectors,
        query_vectors,
        max_num_global_attn_indices,
        is_index_global_attn_nonzero,
        is_local_index_global_attn_nonzero,
        is_local_index_no_global_attn_nonzero,
    ):
        """
        This method concatenates the global key attention probabilities with query vectors.
        
        Args:
        - self: The object instance of the LongformerSelfAttention class.
        - key_vectors (Tensor): The key vectors used for computing attention probabilities.
        - query_vectors (Tensor): The query vectors used for computing attention probabilities.
        - max_num_global_attn_indices (int): The maximum number of global attention indices.
        - is_index_global_attn_nonzero (Tensor): Tensor indicating non-zero global attention indices.
        - is_local_index_global_attn_nonzero (Tensor): Tensor indicating non-zero local attention indices for global attention.
        - is_local_index_no_global_attn_nonzero (Tuple): Tuple containing two Tensors indicating non-zero local attention indices where global attention is not present.
        
        Returns:
        - Tensor: The attention probabilities computed from global key vectors concatenated with query vectors.
        
        Raises:
        - ValueError: If the shape of the parameters is not compatible with the operations.
        - TypeError: If the data types of the input parameters are not supported.
        - RuntimeError: If there is an issue during the computation of attention probabilities.
        """
        batch_size = key_vectors.shape[0]

        # create only global key vectors
        key_vectors_only_global = key_vectors.new_zeros(
            (batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim)
        )
        key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]

        # (batch_size, seq_len, num_heads, max_num_global_attn_indices)
        attn_probs_from_global_key = ops.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))

        # need to swapaxes since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
        attn_probs_from_global_key = attn_probs_from_global_key.swapaxes(1, 3)
        if 0 not in is_local_index_no_global_attn_nonzero[0].shape:
            attn_probs_from_global_key[
                is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
            ] = float(np.finfo(mindspore.dtype_to_nptype(attn_probs_from_global_key.dtype)).min)
        attn_probs_from_global_key = attn_probs_from_global_key.swapaxes(1, 3)

        return attn_probs_from_global_key

    def _compute_attn_output_with_global_indices(
        self,
        value_vectors,
        attn_probs,
        max_num_global_attn_indices,
        is_index_global_attn_nonzero,
        is_local_index_global_attn_nonzero,
    ):
        """
        Compute the attention output with global indices.
        
        Args:
            self (LongformerSelfAttention): An instance of the LongformerSelfAttention class.
            value_vectors (torch.Tensor): The value vectors with shape (batch_size, sequence_length, num_heads, head_dim).
            attn_probs (torch.Tensor): The attention probabilities with shape (batch_size, sequence_length, sequence_length).
            max_num_global_attn_indices (int): The maximum number of global attention indices.
            is_index_global_attn_nonzero (torch.Tensor): A boolean tensor with shape (batch_size, sequence_length) indicating whether each index has a global attention or not.
            is_local_index_global_attn_nonzero (torch.Tensor): A boolean tensor with shape (batch_size, sequence_length) indicating whether each local index with global attention has a nonzero value.
        
        Returns:
            None. This method modifies the attention output in-place.
        
        Raises:
            None.
        """
        batch_size = attn_probs.shape[0]

        # cut local attn probs to global only
        attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
        # get value vectors for global only
        value_vectors_only_global = value_vectors.new_zeros(
            (batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim)
        )
        value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]

        # use `matmul` because `einsum` crashes sometimes with fp16
        # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
        # compute attn output only global
        attn_output_only_global = ops.matmul(
            attn_probs_only_global.swapaxes(1, 2).copy(), value_vectors_only_global.swapaxes(1, 2).copy()
        ).swapaxes(1, 2)

        # reshape attn probs
        attn_probs_without_global = attn_probs.narrow(
            -1, max_num_global_attn_indices, attn_probs.shape[-1] - max_num_global_attn_indices
        )

        # compute attn output with global
        attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
            attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
        )
        return attn_output_only_global + attn_output_without_global

    def _compute_global_attn_output_from_hidden(
        self,
        hidden_states,
        max_num_global_attn_indices,
        layer_head_mask,
        is_local_index_global_attn_nonzero,
        is_index_global_attn_nonzero,
        is_local_index_no_global_attn_nonzero,
        is_index_masked,
    ):
        '''
        Method: _compute_global_attn_output_from_hidden
        
        Computes the global attention output from the hidden states.
        
        Args:
            self (LongformerSelfAttention): The instance of the LongformerSelfAttention class.
            hidden_states (Tensor): The input hidden states of shape (seq_len, batch_size).
            max_num_global_attn_indices (int): The maximum number of global attention indices.
            layer_head_mask (Tensor): The mask for each layer head of shape (self.num_heads,).
            is_local_index_global_attn_nonzero (Tensor): Boolean tensor indicating if local index in global attention is non-zero, of shape (seq_len, batch_size).
            is_index_global_attn_nonzero (Tensor): Boolean tensor indicating if global attention index is non-zero, of shape (max_num_global_attn_indices, batch_size).
            is_local_index_no_global_attn_nonzero (Tensor): Boolean tensor indicating if local index in no global attention is non-zero, of shape (seq_len, batch_size).
            is_index_masked (Tensor): Boolean tensor indicating if index is masked, of shape (seq_len,).
        
        Returns:
            global_attn_output (Tensor): The output tensor of global attention, of shape (batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim).
            global_attn_probs (Tensor): The tensor of global attention probabilities, of shape (batch_size, self.num_heads, max_num_global_attn_indices, seq_len).
        
        Raises:
            AssertionError: If the shape of global_attn_scores is incorrect.
            AssertionError: If the shape of layer_head_mask is incorrect.
            AssertionError: If the shape of global_attn_output tensor is incorrect.
        '''
        seq_len, batch_size = hidden_states.shape[:2]

        # prepare global hidden states
        global_attn_hidden_states = hidden_states.new_zeros((max_num_global_attn_indices, batch_size, self.embed_dim))
        global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
            is_index_global_attn_nonzero[::-1]
        ]

        # global key, query, value
        global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
        global_key_vectors = self.key_global(hidden_states)
        global_value_vectors = self.value_global(hidden_states)

        # normalize
        global_query_vectors_only_global /= math.sqrt(self.head_dim)

        # reshape
        global_query_vectors_only_global = (
            global_query_vectors_only_global
            .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
            .swapaxes(0, 1)
        )  # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
        global_key_vectors = (
            global_key_vectors.view(-1, batch_size * self.num_heads, self.head_dim).swapaxes(0, 1)
        )  # batch_size * self.num_heads, seq_len, head_dim)
        global_value_vectors = (
            global_value_vectors.view(-1, batch_size * self.num_heads, self.head_dim).swapaxes(0, 1)
        )  # batch_size * self.num_heads, seq_len, head_dim)

        # compute attn scores
        global_attn_scores = ops.bmm(global_query_vectors_only_global, global_key_vectors.swapaxes(1, 2))

        assert list(global_attn_scores.shape) == [
            batch_size * self.num_heads,
            max_num_global_attn_indices,
            seq_len,
        ], (
            "global_attn_scores have the wrong size. Size should be"
            f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is"
            f" {global_attn_scores.shape}."
        )

        global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)

        # need to swapaxes since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
        global_attn_scores = global_attn_scores.swapaxes(1, 2)
        if 0 not in is_local_index_no_global_attn_nonzero[0].shape:
            global_attn_scores[
                is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
            ] = float(np.finfo(mindspore.dtype_to_nptype(global_attn_scores.dtype)).min)
        global_attn_scores = global_attn_scores.swapaxes(1, 2)

        global_attn_scores = global_attn_scores.masked_fill(
            is_index_masked[:, None, None, :],
            float(np.finfo(mindspore.dtype_to_nptype(global_attn_scores.dtype)).min),
        )

        global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)

        # compute global attn probs
        global_attn_probs_float = ops.softmax(
            global_attn_scores, axis=-1, dtype=mindspore.float32
        )  # use fp32 for numerical stability

        # apply layer head masking
        if layer_head_mask is not None:
            assert layer_head_mask.shape == (
                self.num_heads,
            ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.shape}"
            global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(
                batch_size, self.num_heads, max_num_global_attn_indices, seq_len
            )
            global_attn_probs_float = global_attn_probs_float.view(
                batch_size * self.num_heads, max_num_global_attn_indices, seq_len
            )

        global_attn_probs = ops.dropout(
            global_attn_probs_float.astype(global_attn_scores.dtype), p=self.dropout, training=self.training
        )

        # global attn output
        global_attn_output = ops.bmm(global_attn_probs, global_value_vectors)

        assert list(global_attn_output.shape) == [
            batch_size * self.num_heads,
            max_num_global_attn_indices,
            self.head_dim,
        ], (
            "global_attn_output tensor has the wrong size. Size should be"
            f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is"
            f" {global_attn_output.shape}."
        )

        global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
        global_attn_output = global_attn_output.view(
            batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
        )
        return global_attn_output, global_attn_probs


# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LongformerSelfOutput(nn.Cell):

    """
    This class represents the Longformer self-attention mechanism used in the Longformer model. It is responsible for applying a dense layer, layer normalization, and dropout to the hidden states of the input
tensor.
    
    Inherits from: nn.Cell
    
    Attributes:
        dense (nn.Dense): A dense layer that applies a linear transformation to the hidden states.
        LayerNorm (nn.LayerNorm): A layer normalization module that normalizes the hidden states.
        dropout (nn.Dropout): A dropout module that applies dropout regularization to the hidden states.
    
    Methods:
        construct(hidden_states, input_tensor)
            Applies the Longformer self-attention mechanism to the hidden states and returns the resulting tensor.
    
    """
    def __init__(self, config):
        """
        Initializes the LongformerSelfOutput class.
        
        Args:
            self (object): The instance of the LongformerSelfOutput class.
            config (object): An object containing configuration settings.
                - hidden_size (int): The size of the hidden layers.
                - layer_norm_eps (float): The epsilon value for layer normalization.
                - hidden_dropout_prob (float): The dropout probability for hidden layers.
        
        Returns:
            None. This method initializes the LongformerSelfOutput class attributes.
        
        Raises:
            ValueError: If the config object is missing required parameters.
            TypeError: If any of the config parameters are of incorrect type.
        """
        super().__init__()
        self.dense = nn.Dense(config.hidden_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm([config.hidden_size], epsilon=config.layer_norm_eps)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

    def construct(self, hidden_states: mindspore.Tensor, input_tensor: mindspore.Tensor) -> mindspore.Tensor:
        """
        This method 'construct' is a part of the class 'LongformerSelfOutput' and is used to perform a series of operations on the input 'hidden_states' and 'input_tensor' to construct a new tensor.
        
        Args:
            self (LongformerSelfOutput): The instance of the LongformerSelfOutput class.
            hidden_states (mindspore.Tensor): The input tensor representing the hidden states. It is used as an input for the construction process.
            input_tensor (mindspore.Tensor): The input tensor representing additional input. It is used as an input for the construction process.
        
        Returns:
            mindspore.Tensor: The constructed tensor resulting from the operations performed on the 'hidden_states' and 'input_tensor'.
        
        Raises:
            None: This method does not raise any exceptions.
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class LongformerAttention(nn.Cell):

    """
    LongformerAttention class represents a self-attention mechanism specific to Longformer models. 
    This class extends the nn.Cell class and provides methods for initializing, pruning attention heads, and constructing attention outputs.
    
    Attributes:
        config: Configuration parameters for the LongformerAttention.
        layer_id: ID of the attention layer.
    
    Methods:
        __init__(self, config, layer_id=0):
            Initializes the LongformerAttention instance with the given configuration and layer ID.
            
        prune_heads(self, heads):
            Prunes the specified attention heads from the self-attention mechanism.
            
        construct(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False):
            Constructs the attention outputs based on the given inputs and optional masks.
    """
    def __init__(self, config, layer_id=0):
        """
        Initializes a LongformerAttention object.
        
        Args:
            self (LongformerAttention): The LongformerAttention object itself.
            config (object): The configuration object containing settings for the attention layer.
            layer_id (int, optional): The ID of the layer within the LongformerAttention. Defaults to 0.
        
        Returns:
            None. This method does not return any value.
        
        Raises:
            N/A
        """
        super().__init__()
        self.self = LongformerSelfAttention(config, layer_id)
        self.output = LongformerSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        """
        Method to prune attention heads in the LongformerAttention class.
        
        Args:
            self: The instance of the LongformerAttention class.
                Type: LongformerAttention
                Purpose: Represents the current instance of the LongformerAttention class.
                Restrictions: N/A
            
            heads: The list of attention heads to be pruned.
                Type: List[int]
                Purpose: Specifies the indices of attention heads to be pruned.
                Restrictions: Must be a list of integers representing valid attention head indices.
        
        Returns:
            None: This method does not return any value. It operates by modifying the internal state of the LongformerAttention instance.
        
        Raises:
            N/A
        """
        if len(heads) == 0:
            return
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # Prune linear layers
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, axis=1)

        # Update hyper params and store pruned heads
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def construct(
        self,
        hidden_states,
        attention_mask=None,
        layer_head_mask=None,
        is_index_masked=None,
        is_index_global_attn=None,
        is_global_attn=None,
        output_attentions=False,
    ):
        """
        Constructs the LongformerAttention.
        
        Args:
            self (LongformerAttention): An instance of the LongformerAttention class.
            hidden_states (torch.Tensor): The input hidden states of shape (batch_size, sequence_length, hidden_size).
            attention_mask (torch.Tensor, optional): A binary mask of shape (batch_size, sequence_length) indicating which
                positions should be attended to. Defaults to None.
            layer_head_mask (torch.Tensor, optional): A binary mask of shape (num_hidden_layers, num_attention_heads) indicating
                which layers and heads should be masked. Defaults to None.
            is_index_masked (torch.Tensor, optional): A binary mask of shape (batch_size, sequence_length) indicating which
                positions should be masked. Defaults to None.
            is_index_global_attn (torch.Tensor, optional): A binary mask of shape (batch_size, sequence_length) indicating 
                which positions should attend to all other positions. Defaults to None.
            is_global_attn (torch.Tensor, optional): A binary mask of shape (batch_size, sequence_length) indicating which
                positions should attend to all other positions. Defaults to None.
            output_attentions (bool, optional): Whether to output attentions. Defaults to False.
        
        Returns:
            tuple: A tuple containing the attention output tensor of shape (batch_size, sequence_length, hidden_size) and 
            any additional outputs returned by the self attention module.
        
        Raises:
            None.
        """
        self_outputs = self.self(
            hidden_states,
            attention_mask=attention_mask,
            layer_head_mask=layer_head_mask,
            is_index_masked=is_index_masked,
            is_index_global_attn=is_index_global_attn,
            is_global_attn=is_global_attn,
            output_attentions=output_attentions,
        )
        attn_output = self.output(self_outputs[0], hidden_states)
        outputs = (attn_output,) + self_outputs[1:]
        return outputs


# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LongformerIntermediate(nn.Cell):

    """
    This class represents an intermediate layer of the Longformer model. It inherits from the nn.Cell class.
    
    Attributes:
        dense (nn.Dense): A dense neural network layer that maps the input tensor to the hidden size specified in the configuration.
        intermediate_act_fn (function): The activation function applied to the intermediate hidden states.
    
    Methods:
        __init__(self, config): Initializes the LongformerIntermediate instance.
        construct(self, hidden_states: mindspore.Tensor) -> mindspore.Tensor: Constructs the intermediate layer of the Longformer model.
    
    """
    def __init__(self, config):
        """
        Initializes an instance of the LongformerIntermediate class.
        
        Args:
            self: The instance of the class.
            config: An object of type 'Config' containing the configuration parameters for the LongformerIntermediate.
        
        Returns:
            None.
        
        Raises:
            None.
        """
        super().__init__()
        self.dense = nn.Dense(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def construct(self, hidden_states: mindspore.Tensor) -> mindspore.Tensor:
        """
        Method 'construct' in the class 'LongformerIntermediate'.
        
        Args:
            self: Instance of the class LongformerIntermediate.
                This parameter is required to access the instance attributes and methods.
            
            hidden_states: mindspore.Tensor
                A tensor containing the hidden states data to be processed.
                Type: mindspore.Tensor
                Purpose: Input tensor for the intermediate layer processing.
                Restrictions: Should be a valid tensor compatible with the operations within the method.
        
        Returns:
            mindspore.Tensor
                Returns the processed hidden_states tensor after passing through intermediate layers.
                Type: mindspore.Tensor
                Purpose: Processed tensor after applying dense and intermediate activation function.
        
        Raises:
            None
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states


# Copied from transformers.models.bert.modeling_bert.BertOutput
class LongformerOutput(nn.Cell):

    """
    Represents the output of the Longformer model, which includes dense, layer normalization, and dropout operations.
    
    This class inherits from nn.Cell and is used to define the output layer for the Longformer model. It includes methods to initialize the class and construct the output based on the given input tensors.
    
    The __init__ method initializes the LongformerOutput class with the provided configuration. It sets up the dense layer, layer normalization, and dropout operations based on the configuration parameters.
    
    The construct method takes hidden_states and input_tensor as input tensors and performs the dense, dropout, and layer normalization operations to construct the output tensor.
    
    """
    def __init__(self, config):
        """
        Initializes a LongformerOutput instance.
        
        Args:
            self: The instance itself.
            config: An object containing the configuration parameters for the LongformerOutput.
                Type: object
                Purpose: It holds the configuration parameters for the LongformerOutput.
                Restrictions: Must be a valid configuration object.
        
        Returns:
            None. This method does not return any value.
        
        Raises:
            None.
        """
        super().__init__()
        self.dense = nn.Dense(config.intermediate_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm([config.hidden_size], epsilon=config.layer_norm_eps)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

    def construct(self, hidden_states: mindspore.Tensor, input_tensor: mindspore.Tensor) -> mindspore.Tensor:
        """
        Construct method in the LongformerOutput class.
        
        This method performs the construction process and returns the resulting tensor.
        
        Args:
            self: Instance of the LongformerOutput class.
            hidden_states (mindspore.Tensor): The input tensor representing the hidden states.
                It is expected to be of type mindspore.Tensor and contains the hidden states data.
            input_tensor (mindspore.Tensor): The input tensor representing the input data.
                It is expected to be of type mindspore.Tensor and contains the input data.
        
        Returns:
            mindspore.Tensor: The resulting tensor after the construction process.
                It is of type mindspore.Tensor and represents the output of the construction process.
        
        Raises:
            None
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class LongformerLayer(nn.Cell):

    """A class representing a Longformer layer.
    
    This class inherits from the nn.Cell class and implements a single layer of the Longformer model. The Longformer layer consists of three main components: attention, intermediate, and output. It also
provides methods for constructing the layer and performing feed-forward chunking.
    
    Attributes:
        attention (LongformerAttention): The attention module of the layer.
        intermediate (LongformerIntermediate): The intermediate module of the layer.
        output (LongformerOutput): The output module of the layer.
        chunk_size_feed_forward (int): The chunk size used for feed-forward chunking.
        seq_len_dim (int): The dimension of the sequence length.
    
    Methods:
        __init__(self, config, layer_id=0): Initializes a new instance of LongformerLayer.
        construct(self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False): Constructs the LongformerLayer
given the input hidden states and optional masks.
        ff_chunk(self, attn_output): Performs feed-forward chunking on the given attention output.
    
    """
    def __init__(self, config, layer_id=0):
        """
        Initializes a LongformerLayer instance.
        
        Args:
            self: The LongformerLayer object.
            config: An instance of the LongformerConfig class, containing the configuration parameters for the layer.
            layer_id (optional): An integer representing the layer ID (default: 0).
        
        Returns:
            None
        
        Raises:
            None
        """
        super().__init__()
        self.attention = LongformerAttention(config, layer_id)
        self.intermediate = LongformerIntermediate(config)
        self.output = LongformerOutput(config)
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1

    def construct(
        self,
        hidden_states,
        attention_mask=None,
        layer_head_mask=None,
        is_index_masked=None,
        is_index_global_attn=None,
        is_global_attn=None,
        output_attentions=False,
    ):
        """
        This method constructs the Longformer layer.
        
        Args:
            self (object): The LongformerLayer instance.
            hidden_states (tensor): The input hidden states for the layer.
            attention_mask (tensor, optional): A mask indicating which elements should be attended to and which should not. Default is None.
            layer_head_mask (tensor, optional): A mask for each layer indicating which heads should be used in the layer. Default is None.
            is_index_masked (bool, optional): A flag indicating whether the index is masked. Default is None.
            is_index_global_attn (bool, optional): A flag indicating whether the index has global attention. Default is None.
            is_global_attn (bool, optional): A flag indicating whether global attention is used. Default is None.
            output_attentions (bool): A flag indicating whether to output attentions. Default is False.
        
        Returns:
            tuple: A tuple containing the layer output and any additional outputs.
        
        Raises:
            None
        """
        self_attn_outputs = self.attention(
            hidden_states,
            attention_mask=attention_mask,
            layer_head_mask=layer_head_mask,
            is_index_masked=is_index_masked,
            is_index_global_attn=is_index_global_attn,
            is_global_attn=is_global_attn,
            output_attentions=output_attentions,
        )
        attn_output = self_attn_outputs[0]
        outputs = self_attn_outputs[1:]

        layer_output = apply_chunking_to_forward(
            self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
        )
        outputs = (layer_output,) + outputs
        return outputs

    def ff_chunk(self, attn_output):
        """
        Method ff_chunk in the class LongformerLayer.
        
        Args:
            self (object): The instance of the LongformerLayer class.
                Purpose: Represents the current instance of the LongformerLayer class.
                Restrictions: Must be an instance of the LongformerLayer class.
        
            attn_output (object): The attention output received by the method.
                Purpose: Represents the output of attention mechanism.
                Restrictions: Must be a valid input for further processing.
        
        Returns:
            NoneType: Indicates that the method does not return any value.
                Purpose: The method performs internal computations and does not have a specific output.
        
        Raises:
            None.
        """
        intermediate_output = self.intermediate(attn_output)
        layer_output = self.output(intermediate_output, attn_output)
        return layer_output


class LongformerEncoder(nn.Cell):

    """
    The `LongformerEncoder` class represents an encoder component of the Longformer model. It is used to process input sequences using a stack of Longformer layers.
    
    This class inherits from `nn.Cell` and initializes with a configuration object `config`. The `config` parameter specifies the configuration settings for the LongformerEncoder.
    
    The LongformerEncoder consists of a series of Longformer layers. The number of layers is determined by the `config.num_hidden_layers` parameter. Each layer is represented by an instance of the
`LongformerLayer` class.
    
    The `construct` method is responsible for processing the input sequence through the Longformer layers. It takes the following parameters:
    - `hidden_states`: The input hidden states of the sequence.
    - `attention_mask`: An optional attention mask to mask certain positions in the input sequence. Positions with a value less than 0 are considered masked.
    - `head_mask`: An optional head mask to mask certain heads in each layer. The shape of the head mask should match the number of layers in the LongformerEncoder.
    - `padding_len`: The length of padding added to the input sequence. This is used to truncate the hidden states and attention tensors.
    - `output_attentions`: A boolean flag indicating whether to output attention tensors.
    - `output_hidden_states`: A boolean flag indicating whether to output hidden states of each layer.
    - `return_dict`: A boolean flag indicating whether to return the output as a LongformerBaseModelOutput dictionary.
    
    The `construct` method processes the input sequence through each layer of the LongformerEncoder. It keeps track of the hidden states and attention tensors if the corresponding flags are set. If a head mask
is provided, it is applied to the respective layer. At the end, the method returns a LongformerBaseModelOutput containing the last hidden state, hidden states of all layers, attention tensors, and global
attention tensors if applicable.
    
    Note: The LongformerEncoder assumes that the input hidden states and attention mask have compatible shapes.
    
    Please refer to the LongformerBaseModelOutput documentation for details on the structure of the output.
    
    Example usage:
    
    config = LongformerConfig(num_hidden_layers=12)
    encoder = LongformerEncoder(config)
    input_hidden_states = ...
    output = encoder.construct(input_hidden_states)
    
    """
    def __init__(self, config):
        """
        Initializes a LongformerEncoder object with the provided configuration.
        
        Args:
            self (object): The LongformerEncoder instance.
            config (dict): A dictionary containing configuration parameters for the LongformerEncoder.
                The configuration dictionary should include the following keys:
                    - num_hidden_layers (int): The number of hidden layers in the encoder.
        
        Returns:
            None. This method does not return any value explicitly.
        
        Raises:
            - TypeError: If the provided 'config' parameter is not a dictionary.
            - ValueError: If the 'num_hidden_layers' key is missing in the configuration dictionary.
            - ValueError: If the 'num_hidden_layers' value is not a positive integer.
            - Other potential exceptions related to creating LongformerLayer objects within the CellList.
        """
        super().__init__()
        self.config = config
        self.layer = nn.CellList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)])

    def construct(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        padding_len=0,
        output_attentions=False,
        output_hidden_states=False,
        return_dict=True,
    ):
        """
        This method constructs the LongformerEncoder by processing the provided input parameters.
        
        Args:
        - self: The instance of the LongformerEncoder class.
        - hidden_states (torch.Tensor): The input hidden states to be processed.
        - attention_mask (torch.Tensor, optional): Masking tensor to filter out certain tokens during attention calculation. Default is None.
        - head_mask (torch.Tensor, optional): Masking tensor to filter out certain heads in the attention mechanism. Default is None.
        - padding_len (int, optional): The length of padding to be removed from the final hidden states. Default is 0.
        - output_attentions (bool, optional): Flag to indicate whether to output attentions. Default is False.
        - output_hidden_states (bool, optional): Flag to indicate whether to output hidden states. Default is False.
        - return_dict (bool, optional): Flag to indicate whether to return the results as a dictionary. Default is True.
        
        Returns:
        None. The method directly modifies the hidden states and produces output through side effects.
        
        Raises:
        - AssertionError: If the head_mask does not have the correct shape for the number of layers in the LongformerEncoder.
        
        """
        is_index_masked = attention_mask < 0
        is_index_global_attn = attention_mask > 0

        # Record `is_global_attn == True` to enable ONNX export
        is_global_attn = is_index_global_attn.flatten().any().item()

        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None  # All local attentions.
        all_global_attentions = () if (output_attentions and is_global_attn) else None

        # check if head_mask has a correct number of layers specified if desired
        if head_mask is not None:
            assert head_mask.shape[0] == (
                len(self.layer)
            ), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.shape[0]}."
        for idx, layer_module in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            layer_outputs = layer_module(
                hidden_states,
                attention_mask=attention_mask,
                layer_head_mask=head_mask[idx] if head_mask is not None else None,
                is_index_masked=is_index_masked,
                is_index_global_attn=is_index_global_attn,
                is_global_attn=is_global_attn,
                output_attentions=output_attentions,
            )
            hidden_states = layer_outputs[0]

            if output_attentions:
                # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
                all_attentions = all_attentions + (layer_outputs[1].swapaxes(1, 2),)

                if is_global_attn:
                    # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
                    all_global_attentions = all_global_attentions + (layer_outputs[2].swapaxes(2, 3),)

        # Add last layer
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        # undo padding if necessary
        # unpad `hidden_states` because the calling function is expecting a length == input_ids.shape[1]
        hidden_states = hidden_states[:, : hidden_states.shape[1] - padding_len]
        if output_hidden_states:
            all_hidden_states = tuple(state[:, : state.shape[1] - padding_len] for state in all_hidden_states)

        if output_attentions:
            all_attentions = tuple(state[:, :, : state.shape[2] - padding_len, :] for state in all_attentions)

        if not return_dict:
            return tuple(
                v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
            )
        return LongformerBaseModelOutput(
            last_hidden_state=hidden_states,
            hidden_states=all_hidden_states,
            attentions=all_attentions,
            global_attentions=all_global_attentions,
        )


# Copied from transformers.models.bert.modeling_bert.BertPooler
class LongformerPooler(nn.Cell):

    """
    This class represents a LongformerPooler, which is a neural network module for pooling hidden states of a Longformer model. 
    It inherits from the nn.Cell class.
    
    Attributes:
        dense (nn.Dense): A fully connected layer used for transforming the input hidden states.
        activation (nn.Tanh): An activation function applied after the transformation.
    
    Methods:
        __init__(self, config):
            Initializes a new instance of the LongformerPooler class.
            
        construct(self, hidden_states: mindspore.Tensor) -> mindspore.Tensor:
            Constructs the pooled output tensor based on the given hidden states.
            
    """
    def __init__(self, config):
        """
        Initializes an instance of the LongformerPooler class.
        
        Args:
            self: The LongformerPooler instance being initialized.
            config: An instance of the configuration class containing the pooler's configuration parameters.
        
        Returns:
            None
        
        Raises:
            None
        """
        super().__init__()
        self.dense = nn.Dense(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def construct(self, hidden_states: mindspore.Tensor) -> mindspore.Tensor:
        """
        Constructs a pooled output tensor from the given hidden states.
        
        Args:
            self (LongformerPooler): An instance of the LongformerPooler class.
            hidden_states (mindspore.Tensor): A tensor of shape (batch_size, sequence_length, hidden_size) containing the hidden states of the input tokens.
        
        Returns:
            mindspore.Tensor: A tensor of shape (batch_size, hidden_size) representing the pooled output. 
                The pooled output tensor is obtained by applying a dense layer and an activation function to the first token's hidden state, which is sliced from the hidden_states tensor.
        
        Raises:
            None.
        
        Note:
            - The hidden_states tensor should have a shape (batch_size, sequence_length, hidden_size), where batch_size represents the number of input samples,
              sequence_length represents the number of tokens in each sample, and hidden_size represents the size of the hidden state vector.
            - The first token's hidden state is obtained by slicing the hidden_states tensor using the syntax hidden_states[:, 0].
            - The pooled output tensor is obtained by passing the first token's hidden state through a dense layer and applying an activation function to it.
              The dense layer and activation function are defined within the LongformerPooler class.
        """
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token.
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output


# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer
class LongformerLMHead(nn.Cell):
    """Longformer Head for masked language modeling."""
    def __init__(self, config):
        """
        Initializes the LongformerLMHead instance.
        
        Args:
            self: The LongformerLMHead instance to be initialized.
            config: An instance of the configuration class containing the following attributes:
                - hidden_size (int): The size of the hidden layers.
                - vocab_size (int): The size of the vocabulary.
                - layer_norm_eps (float): The epsilon value for layer normalization.
        
        Returns:
            None. This method does not return any value.
        
        Raises:
            - AttributeError: If the 'config' parameter is missing any required attributes.
            - TypeError: If the 'hidden_size', 'vocab_size', or 'layer_norm_eps' attributes in the 'config' parameter are of incorrect types.
            - ValueError: If the 'hidden_size', 'vocab_size', or 'layer_norm_eps' attributes in the 'config' parameter have invalid values.
        """
        super().__init__()
        self.dense = nn.Dense(config.hidden_size, config.hidden_size)
        self.layer_norm = nn.LayerNorm([config.hidden_size], epsilon=config.layer_norm_eps)

        self.decoder = nn.Dense(config.hidden_size, config.vocab_size)
        self.bias = Parameter(ops.zeros(config.vocab_size), 'bias')
        self.decoder.bias = self.bias

    def construct(self, features, **kwargs):
        """
        Construct method in the LongformerLMHead class.
        
        Args:
            self (LongformerLMHead): The instance of the LongformerLMHead class.
            features (tensor): The input features to be processed.
            
        Returns:
            tensor: The processed output tensor.
        
        Raises:
            None.
        """
        x = self.dense(features)
        x = gelu(x)
        x = self.layer_norm(x)

        # project back to size of vocabulary with bias
        x = self.decoder(x)

        return x

    def _tie_weights(self):
        """
        Method _tie_weights in the class LongformerLMHead.
        
        Args:
            self: LongformerLMHead - The instance of the LongformerLMHead class.
                This parameter is required for the method to access and modify the instance's attributes.
                
        Returns:
            None - This method does not return any value. It modifies the bias attribute of the instance in place.
        
        Raises:
            This method does not explicitly raise any exceptions.
        """
        self.bias = self.decoder.bias


class LongformerPreTrainedModel(PreTrainedModel):
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """
    config_class = LongformerConfig
    base_model_prefix = "longformer"
    _no_split_modules = ["LongformerSelfAttention"]

    def _init_weights(self, cell):
        """Initialize the weights"""
        if isinstance(cell, nn.Dense):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            cell.weight.set_data(initializer(Normal(self.config.initializer_range),
                                                    cell.weight.shape, cell.weight.dtype))
            if cell.has_bias:
                cell.bias.set_data(initializer('zeros', cell.bias.shape, cell.bias.dtype))
        elif isinstance(cell, nn.Embedding):
            weight = np.random.normal(0.0, self.config.initializer_range, cell.weight.shape)
            if cell.padding_idx:
                weight[cell.padding_idx] = 0

            cell.weight.set_data(Tensor(weight, cell.weight.dtype))
        elif isinstance(cell, nn.LayerNorm):
            cell.weight.set_data(initializer('ones', cell.weight.shape, cell.weight.dtype))
            cell.bias.set_data(initializer('zeros', cell.bias.shape, cell.bias.dtype))


class LongformerModel(LongformerPreTrainedModel):
    """
    This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention
    to provide the ability to process long sequences following the self-attention approach described in [Longformer:
    the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan.
    Longformer self-attention combines a local (sliding window) and global attention to extend to long documents
    without the O(n^2) increase in memory and compute.

    The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global
    attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
    attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
    release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
    kernel to be memory and compute efficient.

    """
    def __init__(self, config, add_pooling_layer=True):
        """
        Initializes a new instance of the LongformerModel class.
        
        Args:
            self: The current instance of the class.
            config (object): The configuration object containing various parameters for the model.
                             It is an instance of the Config class.
                             The object is used to set up the model's configuration.
            add_pooling_layer (bool): Determines whether to add a pooling layer to the model.
                                      Defaults to True.
                                      If set to False, no pooling layer will be added.
        
        Returns:
            None. This method does not return any value.
        
        Raises:
            AssertionError: Raised if the attention_window parameter in the config is not valid.
                            - If attention_window is an integer, it must be an even value and positive.
                            - If attention_window is a list, its length must be equal to num_hidden_layers.
        
        """
        super().__init__(config)
        self.config = config

        if isinstance(config.attention_window, int):
            assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
            assert config.attention_window > 0, "`config.attention_window` has to be positive"
            config.attention_window = [config.attention_window] * config.num_hidden_layers  # one value per layer
        else:
            assert len(config.attention_window) == config.num_hidden_layers, (
                "`len(config.attention_window)` should equal `config.num_hidden_layers`. "
                f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
            )

        self.embeddings = LongformerEmbeddings(config)
        self.encoder = LongformerEncoder(config)
        self.pooler = LongformerPooler(config) if add_pooling_layer else None

        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        """
        Returns the input embeddings of the LongformerModel.
        
        Args:
            self (LongformerModel): An instance of the LongformerModel class.
        
        Returns:
            None: This method does not return any value.
        
        Raises:
            None: This method does not raise any exceptions.
        
        This method retrieves the input embeddings used by the LongformerModel. The input embeddings are derived from the word embeddings of the model.
        """
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        """
        Set input embeddings for the LongformerModel.
        
        Args:
            self (LongformerModel): The instance of the LongformerModel class.
            value (object): The input embeddings to be set. It can be of any type.
        
        Returns:
            None: This method does not return any value.
        
        Raises:
            The method does not raise any exceptions.
        """
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        """
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    def _pad_to_window_size(
        self,
        input_ids: mindspore.Tensor,
        attention_mask: mindspore.Tensor,
        token_type_ids: mindspore.Tensor,
        position_ids: mindspore.Tensor,
        inputs_embeds: mindspore.Tensor,
        pad_token_id: int,
    ):
        """A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
        # padding
        attention_window = (
            self.config.attention_window
            if isinstance(self.config.attention_window, int)
            else max(self.config.attention_window)
        )

        assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
        input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
        batch_size, seq_len = input_shape[:2]

        padding_len = (attention_window - seq_len % attention_window) % attention_window

        # this path should be recorded in the ONNX export, it is fine with padding_len == 0 as well
        if padding_len > 0:
            logger.info(
                f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
                f"`config.attention_window`: {attention_window}"
            )
            if input_ids is not None:
                input_ids = ops.pad(input_ids, (0, padding_len), value=pad_token_id)
            if position_ids is not None:
                # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
                position_ids = ops.pad(position_ids, (0, padding_len), value=pad_token_id)
            if inputs_embeds is not None:
                input_ids_padding = ops.full(
                    (batch_size, padding_len),
                    self.config.pad_token_id,
                    dtype=mindspore.int64,
                )
                inputs_embeds_padding = self.embeddings(input_ids_padding)
                inputs_embeds = ops.cat([inputs_embeds, inputs_embeds_padding], axis=-2)

            attention_mask = ops.pad(
                attention_mask, (0, padding_len), value=0
            )  # no attention on the padding tokens
            token_type_ids = ops.pad(token_type_ids, (0, padding_len), value=0)  # pad with token_type_id = 0

        return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds

    def _merge_to_attention_mask(self, attention_mask: mindspore.Tensor, global_attention_mask: mindspore.Tensor):
        """
        Merges attention_mask and global_attention_mask to create a unified attention mask.
        
        Args:
            self (LongformerModel): An instance of the LongformerModel class.
            attention_mask (mindspore.Tensor): The attention mask tensor. It is a binary tensor of shape (batch_size, sequence_length) where 0 indicates masking and 1 indicates non-masking. If None, the
global_attention_mask is used.
            global_attention_mask (mindspore.Tensor): The global attention mask tensor. It is a binary tensor of shape (batch_size, sequence_length) where 0 indicates masking and 1 indicates non-masking.
        
        Returns:
            mindspore.Tensor: The merged attention mask tensor. It is a binary tensor of shape (batch_size, sequence_length) where 0 indicates masking and 1 indicates non-masking.
        
        Raises:
            None: This method does not raise any exceptions.
        """
        # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
        # (global_attention_mask + 1) => 1 for local attention, 2 for global attention
        # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
        if attention_mask is not None:
            attention_mask = attention_mask * (global_attention_mask + 1)
        else:
            # simply use `global_attention_mask` as `attention_mask`
            # if no `attention_mask` is given
            attention_mask = global_attention_mask + 1
        return attention_mask

    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        global_attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, LongformerBaseModelOutputWithPooling]:
        r"""

        Returns:

        Examples:

        ```python

        >>> from transformers import LongformerModel, AutoTokenizer

        >>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
        >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096")

        >>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000)  # long input document
        >>> input_ids = mindspore.Tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0)  # batch of size 1

        >>> attention_mask = torch.ones(
        ...     input_ids.shape, dtype=mindspore.int64
        ... )  # initialize to local attention
        >>> global_attention_mask = torch.zeros(
        ...     input_ids.shape, dtype=mindspore.int64
        ... )  # initialize to global attention to be deactivated for all tokens
        >>> global_attention_mask[
        ...     :,
        ...     [
        ...         1,
        ...         4,
        ...         21,
        ...     ],
        ... ] = 1  # Set global attention to random tokens for the sake of this example
        >>> # Usually, set global attention based on the task. For example,
        >>> # classification: the <s> token
        >>> # QA: question tokens
        >>> # LM: potentially on the beginning of sentences and paragraphs
        >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
        >>> sequence_output = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output
        ```"""
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        if input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.shape
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.shape[:-1]
        else:
            raise ValueError("You have to specify either input_ids or inputs_embeds")

        if attention_mask is None:
            attention_mask = ops.ones(input_shape)
        if token_type_ids is None:
            token_type_ids = ops.zeros(input_shape, dtype=mindspore.int64)

        # merge `global_attention_mask` and `attention_mask`
        if global_attention_mask is not None:
            attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)

        padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            inputs_embeds=inputs_embeds,
            pad_token_id=self.config.pad_token_id,
        )

        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        extended_attention_mask: mindspore.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[
            :, 0, 0, :
        ]

        embedding_output = self.embeddings(
            input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
        )

        encoder_outputs = self.encoder(
            embedding_output,
            attention_mask=extended_attention_mask,
            head_mask=head_mask,
            padding_len=padding_len,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        sequence_output = encoder_outputs[0]
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None

        if not return_dict:
            return (sequence_output, pooled_output) + encoder_outputs[1:]

        return LongformerBaseModelOutputWithPooling(
            last_hidden_state=sequence_output,
            pooler_output=pooled_output,
            hidden_states=encoder_outputs.hidden_states,
            attentions=encoder_outputs.attentions,
            global_attentions=encoder_outputs.global_attentions,
        )


class LongformerForMaskedLM(LongformerPreTrainedModel):

    """
    This class represents a Longformer model for masked language modeling tasks. It inherits from the LongformerPreTrainedModel class and includes methods for initializing the model, getting and setting output
embeddings, and constructing the model for masked language modeling tasks. The construct method accepts various input tensors and optional keyword arguments, and returns the LongformerMaskedLMOutput. The
method also includes an illustrative example of using the model for mask filling. The class provides detailed explanations for various parameters and return values, and includes usage examples for initializing
the tokenizer and model, as well as performing masked language modeling tasks with long input sequences.
    """
    _tied_weights_keys = ["lm_head.decoder"]

    def __init__(self, config):
        """
        Initializes a new instance of the LongformerForMaskedLM class.
        
        Args:
            self: The object itself.
            config: An instance of the LongformerConfig class containing the configuration parameters for the Longformer model.
        
        Returns:
            None.
        
        Raises:
            None.
        """
        super().__init__(config)

        self.longformer = LongformerModel(config, add_pooling_layer=False)
        self.lm_head = LongformerLMHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def get_output_embeddings(self):
        """
        Returns the output embeddings for the Longformer model.
        
        Args:
            self (LongformerForMaskedLM): The object instance of the LongformerForMaskedLM class.
        
        Returns:
            None: This method does not have a return value.
        
        Raises:
            None: This method does not raise any exceptions.
        """
        return self.lm_head.decoder

    def set_output_embeddings(self, new_embeddings):
        """
        This method sets the output embeddings for the LongformerForMaskedLM model.
        
        Args:
            self (LongformerForMaskedLM): The instance of the LongformerForMaskedLM class.
            new_embeddings (torch.nn.Module): The new embeddings to be set as the output embeddings for the model. It should be an instance of torch.nn.Module.
        
        Returns:
            None: This method returns None.
        
        Raises:
            N/A
        """
        self.lm_head.decoder = new_embeddings

    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        global_attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, LongformerMaskedLMOutput]:
        r"""
        labels (`mindspore.int64Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        kwargs (`Dict[str, any]`, optional, defaults to *{}*):
            Used to hide legacy arguments that have been deprecated.

        Returns:

        Mask filling example:

        ```python
        >>> from transformers import AutoTokenizer, LongformerForMaskedLM

        >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096")
        >>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
        ```

        Let's try a very long input.

        ```python
        >>> TXT = (
        ...     "My friends are <mask> but they eat too many carbs."
        ...     + " That's why I decide not to eat with them." * 300
        ... )
        >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
        >>> logits = model(input_ids).logits

        >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
        >>> probs = logits[0, masked_index].softmax(dim=0)
        >>> values, predictions = probs.topk(5)

        >>> tokenizer.decode(predictions).split()
        ['healthy', 'skinny', 'thin', 'good', 'vegetarian']
        ```"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.longformer(
            input_ids,
            attention_mask=attention_mask,
            global_attention_mask=global_attention_mask,
            head_mask=head_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        sequence_output = outputs[0]
        prediction_scores = self.lm_head(sequence_output)

        masked_lm_loss = None
        if labels is not None:
            masked_lm_loss = ops.cross_entropy(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

        return LongformerMaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            global_attentions=outputs.global_attentions,
        )


class LongformerForSequenceClassification(LongformerPreTrainedModel):

    """
    This class represents a Longformer model for sequence classification tasks. It extends the functionality of the LongformerPreTrainedModel class by adding specific methods for sequence classification. 
    
    The class includes an initialization method (__init__) that sets up the model with the provided configuration. It also provides a construct method for processing input data and generating classification
outputs. The construct method supports various parameters for fine-tuning the model and computing classification losses.
    
    When using this class, users can pass input data such as input_ids, attention_mask, global_attention_mask, and other optional tensors to perform sequence classification. The class handles different types
of classification tasks based on the configuration provided, such as regression, single-label classification, or multi-label classification. 
    
    Additionally, the LongformerForSequenceClassification class offers flexibility in returning output in different formats, including returning a tuple of loss and outputs or a
LongformerSequenceClassifierOutput object containing detailed classification results.
    
    Overall, the LongformerForSequenceClassification class provides a comprehensive solution for leveraging Longformer models in sequence classification tasks within the specified framework.
    """
    def __init__(self, config):
        """
        Initializes a LongformerForSequenceClassification instance.
        
        Args:
            self (LongformerForSequenceClassification): The instance of the LongformerForSequenceClassification class.
            config: A configuration object containing settings for the Longformer model.
                This parameter is required to instantiate the LongformerForSequenceClassification.
                It should include the number of labels for classification and other necessary configuration settings.
        
        Returns:
            None: This method does not return any value.
        
        Raises:
            N/A
        """
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config

        self.longformer = LongformerModel(config, add_pooling_layer=False)
        self.classifier = LongformerClassificationHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        global_attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, LongformerSequenceClassifierOutput]:
        r"""
        labels (`mindspore.int64Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if global_attention_mask is None:
            logger.info("Initializing global attention on CLS token...")
            global_attention_mask = ops.zeros_like(input_ids)
            # global attention on cls token
            global_attention_mask[:, 0] = 1

        outputs = self.longformer(
            input_ids,
            attention_mask=attention_mask,
            global_attention_mask=global_attention_mask,
            head_mask=head_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        sequence_output = outputs[0]
        logits = self.classifier(sequence_output)

        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and labels.dtype in (mindspore.int64, mindspore.int32):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            if self.config.problem_type == "regression":
                if self.num_labels == 1:
                    loss = ops.mse_loss(logits.squeeze(), labels.squeeze())
                else:
                    loss = ops.mse_loss(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                loss = ops.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                loss = ops.binary_cross_entropy_with_logits(logits, labels)

        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return LongformerSequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            global_attentions=outputs.global_attentions,
        )


class LongformerClassificationHead(nn.Cell):
    """Head for sentence-level classification tasks."""
    def __init__(self, config):
        """
        Initialize the LongformerClassificationHead class.
        
        Args:
            self: The object itself.
            config (object): An object containing configuration parameters.
                - hidden_size (int): The size of the hidden layer.
                - hidden_dropout_prob (float): The dropout probability for the hidden layer.
                - num_labels (int): The number of labels for classification.
        
        Returns:
            None. This method initializes the LongformerClassificationHead class attributes.
        
        Raises:
            No specific exceptions are raised within this method.
        """
        super().__init__()
        self.dense = nn.Dense(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
        self.out_proj = nn.Dense(config.hidden_size, config.num_labels)

    def construct(self, hidden_states, **kwargs):
        """Constructs the Longformer classification head.
        
        Args:
            self (LongformerClassificationHead): The instance of the LongformerClassificationHead class.
            hidden_states (torch.Tensor): The input hidden states. Shape (batch_size, sequence_length, hidden_size).
        
        Returns:
            torch.Tensor: The output tensor of shape (batch_size, sequence_length, num_labels), representing the classification scores for each label.
        
        Raises:
            None.
        """
        hidden_states = hidden_states[:, 0, :]  # take <s> token (equiv. to [CLS])
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.dense(hidden_states)
        hidden_states = ops.tanh(hidden_states)
        hidden_states = self.dropout(hidden_states)
        output = self.out_proj(hidden_states)
        return output


class LongformerForQuestionAnswering(LongformerPreTrainedModel):

    """
    This class represents a Longformer model for question answering tasks. It inherits from the LongformerPreTrainedModel class.
    
    The LongformerForQuestionAnswering class contains methods for constructing and running the model. The constructor initializes the model with the given configuration. The model architecture consists of a
LongformerModel and a linear layer for question answering. 
    
    The construct method is used to perform question answering on the input data. It takes several input tensors including input_ids, attention_mask, global_attention_mask, head_mask, token_type_ids,
position_ids, and inputs_embeds. It also takes start_positions and end_positions as optional labels for the start and end positions of the answer span. The method returns a tuple of outputs including
start_logits and end_logits which represent the predicted probabilities for the start and end positions of the answer span.
    
    If start_positions and end_positions are provided, the method also computes the token classification loss based on the predicted logits and the provided labels. The loss is averaged over the batch.
    
    Note: The method automatically sets the global attention on question tokens. If global_attention_mask is not provided, it is automatically generated based on the input_ids and the sep_token_id from the
model configuration.
    
    The LongformerForQuestionAnswering class also provides an example usage of the model for question answering tasks using the forward method. The example demonstrates how to use the model to predict the
answer span given a question and a passage.
    
    Please refer to the example code for more details on how to use the LongformerForQuestionAnswering class for question answering tasks.
    """
    def __init__(self, config):
        """
        Initializes a new instance of the LongformerForQuestionAnswering class.
        
        Args:
            self: The object itself.
            config: An instance of a configuration class representing the model configuration. It should contain the following attributes:
                - num_labels (int): The number of labels for the question answering task.
            
        Returns:
            None
            
        Raises:
            None
        """
        super().__init__(config)
        self.num_labels = config.num_labels

        self.longformer = LongformerModel(config, add_pooling_layer=False)
        self.qa_outputs = nn.Dense(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        global_attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        start_positions: Optional[mindspore.Tensor] = None,
        end_positions: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, LongformerQuestionAnsweringModelOutput]:
        r"""
        start_positions (`mindspore.int64Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`mindspore.int64Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.

        Returns:

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, LongformerForQuestionAnswering


        >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
        >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")

        >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
        >>> encoding = tokenizer(question, text, return_tensors="pt")
        >>> input_ids = encoding["input_ids"]

        >>> # default is local attention everywhere
        >>> # the forward method will automatically set global attention on question tokens
        >>> attention_mask = encoding["attention_mask"]

        >>> outputs = model(input_ids, attention_mask=attention_mask)
        >>> start_logits = outputs.start_logits
        >>> end_logits = outputs.end_logits
        >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())

        >>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1]
        >>> answer = tokenizer.decode(
        ...     tokenizer.convert_tokens_to_ids(answer_tokens)
        ... )  # remove space prepending space token
        ```"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if global_attention_mask is None:
            if input_ids is None:
                logger.warning(
                    "It is not possible to automatically generate the `global_attention_mask` because input_ids is"
                    " None. Please make sure that it is correctly set."
                )
            else:
                # set global attention on question tokens automatically
                global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id)

        outputs = self.longformer(
            input_ids,
            attention_mask=attention_mask,
            global_attention_mask=global_attention_mask,
            head_mask=head_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]

        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, axis=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

        total_loss = None
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.shape) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.shape) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.shape[1]
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)

            start_loss = ops.cross_entropy(start_logits, start_positions, ignore_index=ignored_index)
            end_loss = ops.cross_entropy(end_logits, end_positions, ignore_index=ignored_index)
            total_loss = (start_loss + end_loss) / 2

        if not return_dict:
            output = (start_logits, end_logits) + outputs[2:]
            return ((total_loss,) + output) if total_loss is not None else output

        return LongformerQuestionAnsweringModelOutput(
            loss=total_loss,
            start_logits=start_logits,
            end_logits=end_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            global_attentions=outputs.global_attentions,
        )


class LongformerForTokenClassification(LongformerPreTrainedModel):

    """
    This class represents a Longformer model for token classification tasks. It is designed for token classification tasks where the goal is to assign labels to individual tokens in a sequence. The class
inherits from LongformerPreTrainedModel and includes methods for model initialization and forward pass to generate token classification outputs.
    
    The class's constructor initializes the LongformerForTokenClassification model with the provided configuration. It sets up the necessary components such as the LongformerModel, dropout layer, and
classifier for token classification.
    
    The 'construct' method takes input tensors such as input_ids, attention_mask, token_type_ids, etc., and returns token classification outputs. It utilizes the Longformer model to generate sequence outputs,
applies dropout, and passes the output through a classifier to obtain logits. If labels are provided, it computes the cross-entropy loss. The method returns a Tuple containing loss and token classification
outputs, based on the return_dict parameter.
    
    Note that labels should be indices in the range [0, ..., config.num_labels - 1]. The LongformerForTokenClassification class provides functionality for handling token classification tasks efficiently and
can be used in various natural language processing applications.
    """
    def __init__(self, config):
        """
        Initializes a LongformerForTokenClassification object.
        
        Args:
            self (LongformerForTokenClassification): The current instance of the LongformerForTokenClassification class.
            config (LongformerConfig): The configuration for the Longformer model. It contains the following attributes:
                - num_labels (int): The number of classification labels.
                - hidden_dropout_prob (float): The dropout probability for the hidden layers.
        
        Returns:
            None. This method initializes the LongformerForTokenClassification object with the provided configuration.
        
        Raises:
            ValueError: If the configuration is invalid or missing required attributes.
            TypeError: If the configuration is not of type LongformerConfig.
        """
        super().__init__(config)
        self.num_labels = config.num_labels

        self.longformer = LongformerModel(config, add_pooling_layer=False)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
        self.classifier = nn.Dense(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        global_attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, LongformerTokenClassifierOutput]:
        r"""
        labels (`mindspore.int64Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.longformer(
            input_ids,
            attention_mask=attention_mask,
            global_attention_mask=global_attention_mask,
            head_mask=head_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]

        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)

        loss = None
        if labels is not None:
            loss = ops.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))

        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return LongformerTokenClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            global_attentions=outputs.global_attentions,
        )


class LongformerForMultipleChoice(LongformerPreTrainedModel):

    """
    This class represents a Longformer model for multiple choice tasks. It is a subclass of LongformerPreTrainedModel.
    
    The LongformerForMultipleChoice class includes methods to initialize the model, construct the model, and compute the multiple choice classification loss. It also provides a method to retrieve the model
output.
    
    Attributes:
        longformer (LongformerModel): The Longformer model used for encoding the input.
        dropout (Dropout): The dropout layer applied to the encoded output.
        classifier (Dense): The dense layer used for classification.
        
    Methods:
        __init__(self, config): Initializes the LongformerForMultipleChoice model with the given configuration.
        construct(self, input_ids, token_type_ids, attention_mask, global_attention_mask, head_mask, labels, position_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict): Constructs the
LongformerForMultipleChoice model with the given inputs and returns the model output.
    """
    def __init__(self, config):
        """
        Initializes a new instance of the LongformerForMultipleChoice class.
        
        Args:
            self: The instance of the class.
            config (LongformerConfig): The configuration object containing various settings for the Longformer model.
        
        Returns:
            None. This method does not return any value.
        
        Raises:
            TypeError: If the config parameter is not of type LongformerConfig.
            ValueError: If the config parameter is missing required settings or contains invalid values.
            RuntimeError: If there are any issues during the initialization process.
        """
        super().__init__(config)

        self.longformer = LongformerModel(config)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
        self.classifier = nn.Dense(config.hidden_size, 1)

        # Initialize weights and apply final processing
        self.post_init()

    def construct(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        global_attention_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, LongformerMultipleChoiceModelOutput]:
        r"""
        labels (`mindspore.int64Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        """
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # set global attention on question tokens
        if global_attention_mask is None and input_ids is not None:
            logger.info("Initializing global attention on multiple choice...")
            # put global attention on all tokens after `config.sep_token_id`
            global_attention_mask = ops.stack(
                [
                    _compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False)
                    for i in range(num_choices)
                ],
                axis=1,
            )

        flat_input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
        flat_position_ids = position_ids.view(-1, position_ids.shape[-1]) if position_ids is not None else None
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
        flat_global_attention_mask = (
            global_attention_mask.view(-1, global_attention_mask.shape[-1])
            if global_attention_mask is not None
            else None
        )
        flat_inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
            if inputs_embeds is not None
            else None
        )

        outputs = self.longformer(
            flat_input_ids,
            position_ids=flat_position_ids,
            token_type_ids=flat_token_type_ids,
            attention_mask=flat_attention_mask,
            global_attention_mask=flat_global_attention_mask,
            head_mask=head_mask,
            inputs_embeds=flat_inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)

        loss = None
        if labels is not None:
            loss = ops.cross_entropy(reshaped_logits, labels)

        if not return_dict:
            output = (reshaped_logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return LongformerMultipleChoiceModelOutput(
            loss=loss,
            logits=reshaped_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            global_attentions=outputs.global_attentions,
        )

__all__ = [
    "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
    "LongformerForMaskedLM",
    "LongformerForMultipleChoice",
    "LongformerForQuestionAnswering",
    "LongformerForSequenceClassification",
    "LongformerForTokenClassification",
    "LongformerModel",
    "LongformerPreTrainedModel",
    "LongformerSelfAttention",
]
