import torch
from customized_bert import BertModel, BertEncoder
from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from transformers.utils import logging
import warnings

logger = logging.get_logger(__name__)

# 对于各stage的中间结果，将其作为全局变量
embedding_output, encoder_outputs, pooled_output = None, None, None
# 作为forward过程的一些中间结果，将其作为全局变量避免多次计算
input_shape, batch_size, seq_length, past_key_values_length, result_dict = None, None, None, None, None
pooler_output = None

class BertPreProcessOutput:
    def __init__(self, input_ids, position_ids, token_type_ids, inputs_embeds, past_key_values_length,
                    extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask,
                        past_key_values, use_cache, output_attentions, output_hidden_states, return_dict):
        self.input_ids = input_ids
        self.position_ids = position_ids
        self.token_type_ids = token_type_ids
        self.inputs_embeds = inputs_embeds
        self.past_key_values_length = past_key_values_length
        self.extended_attention_mask = extended_attention_mask
        self.head_mask = head_mask
        self.encoder_hidden_states = encoder_hidden_states
        self.encoder_extended_attention_mask = encoder_extended_attention_mask
        self.past_key_values = past_key_values
        self.use_cache = use_cache
        self.output_attentions = output_attentions
        self.output_hidden_states = output_hidden_states
        self.return_dict = return_dict

    # def to(self, device):
    #     if self.input_ids != None:
    #         self.input_ids = self.input_ids.to(device)
    #     if self.position_ids != None:
    #         self.position_ids = self.position_ids.to(device)
    #     if self.token_type_ids != None:
    #         self.token_type_ids = self.token_type_ids.to(device)
    #     if self.inputs_embeds != None:
    #         self.inputs_embeds = self.inputs_embeds.to(device)
    #     if self.extended_attention_mask != None:
    #         self.extended_attention_mask = self.extended_attention_mask.to(device)
    #     if self.encoder_hidden_states != None:
    #         self.encoder_hidden_states = self.encoder_hidden_states.to(device)
    #     if self.encoder_extended_attention_mask != None:
    #         self.encoder_extended_attention_mask = self.encoder_extended_attention_mask.to(device)
    #     if self.past_key_values != None:
    #         self.past_key_values = self.past_key_values.to(device)

class BertLayerData:
    def __init__(self, all_hidden_states, all_self_attentions, all_cross_attentions, next_decoder_cache, hidden_states=None):
        self.all_hidden_states = all_hidden_states
        self.all_self_attentions =  all_self_attentions
        self.all_cross_attentions = all_cross_attentions
        self.next_decoder_cache = next_decoder_cache
        self.hidden_states = hidden_states

def invert_attention_mask(encoder_attention_mask):
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
            encoder_attention_mask (`torch.Tensor`): An attention mask.

        Returns:
            `torch.Tensor`: The inverted attention mask.
        """
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=torch.float32)  # fp16 compatibility
        encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(torch.float32).min

        return encoder_extended_attention_mask

def get_extended_attention_mask(attention_mask, input_shape, device=None, dtype=None):
    """
    Makes broadcastable attention and causal masks so that future and masked tokens are ignored.

    Arguments:
        attention_mask (`torch.Tensor`):
            Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
        input_shape (`Tuple[int]`):
            The shape of the input to the model.

    Returns:
        `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
    """
    if dtype is None:
        dtype = torch.float32

    if not (attention_mask.dim() == 2 and False):
        # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder`
        if device is not None:
            warnings.warn(
                "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
            )
    # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
    # ourselves in which case we just need to make it broadcastable to all heads.
    if attention_mask.dim() == 3:
        extended_attention_mask = attention_mask[:, None, :, :]
    elif attention_mask.dim() == 2:
        # Provided a padding mask of dimensions [batch_size, seq_length]
        # - if the model is a decoder, apply a causal mask in addition to the padding mask
        # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
        if False:
            extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(
                input_shape, attention_mask, device
            )
        else:
            extended_attention_mask = attention_mask[:, None, None, :]
    else:
        raise ValueError(
            f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
        )

    # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
    # masked positions, this operation will create a tensor which is 0.0 for
    # positions we want to attend and the dtype's smallest value for masked positions.
    # Since we are adding it to the raw scores before the softmax, this is
    # effectively the same as removing these entirely.
    extended_attention_mask = extended_attention_mask.to(dtype=dtype)  # fp16 compatibility
    extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min
    return extended_attention_mask

def get_head_mask(head_mask, num_hidden_layers, is_attention_chunked=False):
    """
    Prepare the head mask if needed.

    Args:
        head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
            The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
        num_hidden_layers (`int`):
            The number of hidden layers in the model.
        is_attention_chunked: (`bool`, *optional*, defaults to `False`):
            Whether or not the attentions scores are computed by chunks or not.

    Returns:
        `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
        `[None]` for each layer.
    """
    if head_mask is not None:
        pass
    else:
        head_mask = [None] * num_hidden_layers

    return head_mask

def bert_preprocess(encoded_input, config, embeddings):
    input_ids = encoded_input.get("input_ids")
    attention_mask = encoded_input.get("attention_mask")
    token_type_ids = encoded_input.get("token_type_ids")
    position_ids = encoded_input.get("position_ids")
    head_mask = encoded_input.get("head_mask")
    inputs_embeds = encoded_input.get("inputs_embeds")
    encoder_hidden_states = encoded_input.get("encoder_hidden_states")
    encoder_attention_mask = encoded_input.get("encoder_attention_mask")
    past_key_values = encoded_input.get("past_key_values")
    use_cache = encoded_input.get("use_cache")
    output_attentions = encoded_input.get("output_attentions")
    output_hidden_states = encoded_input.get("output_hidden_states")
    return_dict = encoded_input.get("return_dict")

    output_attentions = output_attentions if output_attentions is not None else config.output_attentions
    output_hidden_states = (
        output_hidden_states if output_hidden_states is not None else config.output_hidden_states
    )
    return_dict = return_dict if return_dict is not None else config.use_return_dict

    if config.is_decoder:
        use_cache = use_cache if use_cache is not None else config.use_cache
    else:
        use_cache = False

    if input_ids is not None and inputs_embeds is not None:
        raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
    elif input_ids is not None:
        input_shape = input_ids.size()
    elif inputs_embeds is not None:
        input_shape = inputs_embeds.size()[:-1]
    else:
        raise ValueError("You have to specify either input_ids or inputs_embeds")

    batch_size, seq_length = input_shape
    device = input_ids.device if input_ids is not None else inputs_embeds.device

    # past_key_values_length
    past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0

    if attention_mask is None:
        attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)

    if token_type_ids is None:
        if hasattr(embeddings, "token_type_ids"):
            buffered_token_type_ids = embeddings.token_type_ids[:, :seq_length]
            buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
            token_type_ids = buffered_token_type_ids_expanded
        else:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)

    # bert_model = BertModel(config)
    # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
    # ourselves in which case we just need to make it broadcastable to all heads.
    extended_attention_mask: torch.Tensor = get_extended_attention_mask(attention_mask, input_shape, dtype=torch.float32)

    # If a 2D or 3D attention mask is provided for the cross-attention
    # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
    if config.is_decoder and encoder_hidden_states is not None:
        encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
        encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
        if encoder_attention_mask is None:
            encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
        encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask)
    else:
        encoder_extended_attention_mask = None

    # Prepare head mask if needed
    # 1.0 in head_mask indicate we keep the head
    # attention_probs has shape bsz x n_heads x N x N
    # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
    # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
    head_mask = get_head_mask(head_mask, config.num_hidden_layers)

    return BertPreProcessOutput(input_ids, position_ids, token_type_ids, inputs_embeds, past_key_values_length,
                            extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask,
                            past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)

def encoder_preprocess(output_hidden_states, output_attentions, config, use_cache):
    all_hidden_states = () if output_hidden_states else None
    all_self_attentions = () if output_attentions else None
    all_cross_attentions = () if output_attentions and config.add_cross_attention else None

    # if bert_encoder.gradient_checkpointing and bert_encoder.training:
    #     if use_cache:
    #         logger.warning_once(
    #             "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
    #         )
    #         use_cache = False

    next_decoder_cache = () if use_cache else None

    return BertLayerData(all_hidden_states, all_self_attentions, all_cross_attentions, next_decoder_cache)

def bert_layer_forward_i(layer_module, idx, bert_layer_data, preprocess_output, config):
    if preprocess_output.output_hidden_states:
        bert_layer_data.all_hidden_states = bert_layer_data.all_hidden_states + (bert_layer_data.hidden_states,)

    layer_head_mask = preprocess_output.head_mask[idx] if preprocess_output.head_mask is not None else None
    past_key_value = preprocess_output.past_key_values[idx] if preprocess_output.past_key_values is not None else None

    layer_outputs = layer_module(
        bert_layer_data.hidden_states,
        preprocess_output.extended_attention_mask,
        layer_head_mask,
        preprocess_output.encoder_hidden_states,
        preprocess_output.encoder_extended_attention_mask,
        past_key_value,
        preprocess_output.output_attentions,
    )

    bert_layer_data.hidden_states = layer_outputs[0]
    if preprocess_output.use_cache:
        bert_layer_data.next_decoder_cache += (layer_outputs[-1],)
    if preprocess_output.output_attentions:
        bert_layer_data.all_self_attentions = bert_layer_data.all_self_attentions + (layer_outputs[1],)
        if config.add_cross_attention:
            bert_layer_data.all_cross_attentions = bert_layer_data.all_cross_attentions + (layer_outputs[2],)

    return BertLayerData(bert_layer_data.all_hidden_states, bert_layer_data.all_self_attentions,
                         bert_layer_data.all_cross_attentions, bert_layer_data.next_decoder_cache, bert_layer_data.hidden_states)

def get_encoder_outputs(preprocess_output, encoder_preprocess_output):
    if preprocess_output.output_hidden_states:
        encoder_preprocess_output.all_hidden_states = encoder_preprocess_output.all_hidden_states + (
        encoder_preprocess_output.hidden_states,)

    if not preprocess_output.return_dict:
        return tuple(
            v
            for v in [
                encoder_preprocess_output.hidden_states,
                encoder_preprocess_output.next_decoder_cache,
                encoder_preprocess_output.all_hidden_states,
                encoder_preprocess_output.all_self_attentions,
                encoder_preprocess_output.all_cross_attentions,
            ]
            if v is not None
        )
    else:
        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=encoder_preprocess_output.hidden_states,
            past_key_values=encoder_preprocess_output.next_decoder_cache,
            hidden_states=encoder_preprocess_output.all_hidden_states,
            attentions=encoder_preprocess_output.all_self_attentions,
            cross_attentions=encoder_preprocess_output.all_cross_attentions,
        )

def bert_pooler_forward(pooler, encoder_outputs, return_dict):
    sequence_output = encoder_outputs[0]
    pooled_output = pooler(sequence_output) if pooler is not None else None

    if not return_dict:
        return (sequence_output, pooled_output) + encoder_outputs[1:]

    return BaseModelOutputWithPoolingAndCrossAttentions(
        last_hidden_state=sequence_output,
        pooler_output=pooled_output,
        past_key_values=encoder_outputs.past_key_values,
        hidden_states=encoder_outputs.hidden_states,
        attentions=encoder_outputs.attentions,
        cross_attentions=encoder_outputs.cross_attentions,
    )