import torch
import torch.nn as nn

import math
from transformers.activations import get_activation
from transformers import BertPreTrainedModel, BertModel, BertConfig
from transformers.models.bert.modeling_bert import BertLMPredictionHead, BertOnlyMLMHead


class BertOnlyMLM2Head(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.vocab_size = config.vocab_size
        self.token_mlm = BertLMPredictionHead(config)   #mask lm layer
        self.loss_fn = nn.CrossEntropyLoss()
        
    def forward(self, hidden_out):
        mlm_logits = self.token_mlm(hidden_out)
        return mlm_logits
    
    def loss_fc(self, mlm_logits, mlm_label_ids, is_training, device):
        mlm_loss = None
        if mlm_label_ids is not None and is_training:
            mlm_logits = mlm_logits[:, :mlm_label_ids.shape[1], :].contiguous()
            active_labels = mlm_label_ids.view(-1) != 0
            mlm_loss = self.loss_fn(mlm_logits.view(-1, self.vocab_size), mlm_label_ids.view(-1))
            #masked_lm_loss = torch.div((mlm_loss * active_labels).sum(), active_labels.sum())  #skip pad label loss

            mlm_act_idx = torch.argmax(mlm_logits, dim=2).view(-1)[active_labels]
            mlm_label_ids = mlm_label_ids.view(-1)[active_labels]
            eq = torch.eq(mlm_act_idx, mlm_label_ids)
            mlm_acc = torch.div(eq.sum(), active_labels.sum())
        else:
            mlm_act_idx = torch.argmax(mlm_logits, dim=2)
            mlm_acc = torch.tensor([0], dtype=torch.float, device=device)
            mlm_loss = torch.tensor([0], dtype=torch.float, device=device)
        
        return mlm_loss, mlm_acc


class BertOnlyTokenClsHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.token_cls = nn.Linear(config.hidden_size, config.num_labels)
        self.loss_fn = nn.CrossEntropyLoss()
        self.num_labels = config.num_labels
    def forward(self, hidden_out):
        token_tag_logits = self.token_cls(hidden_out)
        return token_tag_logits

    def loss_fc(self, tag_logits, edit_tag_ids, attention_mask, is_training, device):
        tag_acc = 0
        tag_act_idx = torch.argmax(tag_logits, dim=2)
        if edit_tag_ids is not None and is_training:
            # Only keep active parts of the loss
            if attention_mask is not None:
                # labels_mask = labels_mask * attention_mask.sum(dim=1, keepdim=True)
                active_loss = attention_mask.view(-1) == 1
                active_logits = tag_logits.view(-1, self.num_labels)[active_loss]
                active_labels = edit_tag_ids.view(-1)[active_loss]
                # active_labels_mask = labels_mask.view(-1)[active_loss]
                tag_logits_idx = tag_act_idx.view(-1)[active_loss]
                tag_loss = self.loss_fn(active_logits, active_labels)
            else:
                tag_loss = self.loss_fn(tag_logits.view(-1, self.num_labels), edit_tag_ids.view(-1))

            # tag_loss = torch.div((tag_loss * active_labels_mask).sum(), active_labels_mask.sum())

            eq = torch.eq(active_labels, tag_logits_idx)
            numerator = eq.sum()
            denominator = attention_mask.sum()
            tag_acc = torch.div(numerator, denominator)
        else:
            tag_loss = torch.tensor([0], dtype=torch.float, device=device)
            tag_acc = torch.tensor([0], dtype=torch.float, device=device)
    
        return tag_loss, tag_acc
    
class BertOnlyTokenPtrHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.ptr_num_hidden_layers = config.ptr_num_hidden_layers
        o_emb = int(math.ceil(math.sqrt(config.num_labels)))
        
        self.tag_embedding_layer = nn.Embedding(config.num_labels, o_emb)
        self.pos_embedding_layer = nn.Embedding(config.max_position_embeddings, config.embedding_size)
        self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)

        self.edit_tagged_seqout_layer = nn.Linear(config.hidden_size + o_emb + config.embedding_size,
                                                  config.hidden_size)

        self.transformer_query_layer = nn.TransformerEncoderLayer(
            d_model=config.hidden_size,
            nhead=config.num_attention_heads,
            dim_feedforward=config.intermediate_size,
            activation=config.hidden_act,
            dropout=config.hidden_dropout_prob,
        )

        self.query_embeddings_layer = nn.Linear(config.hidden_size, config.query_size)
        self.key_embeddings_layer = nn.Linear(config.hidden_size, config.query_size)
        
        self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, hidden_out, edit_tag_ids, attention_mask, input_shape, device):
        tag_embedding = self.tag_embedding_layer(edit_tag_ids)
        position_embeddings = self.pos_embedding_layer(edit_tag_ids)
        embedding = torch.cat([hidden_out, tag_embedding, position_embeddings], dim=2)

        edit_tagged_seqout = self.edit_tagged_seqout_layer(embedding)
        edit_tagged_seqout_act = get_activation('gelu')(edit_tagged_seqout)
        intermediate_query_embeddings = edit_tagged_seqout_act

        intermediate_query_embeddings = intermediate_query_embeddings.transpose(1, 0)

        for _ in range(self.ptr_num_hidden_layers):
            intermediate_query_embeddings = self.transformer_query_layer(intermediate_query_embeddings)
        intermediate_query_embeddings = intermediate_query_embeddings.transpose(1, 0)

        query_embeddings = self.query_embeddings_layer(intermediate_query_embeddings)
        key_embeddings = self.key_embeddings_layer(edit_tagged_seqout_act)
        pointing_logits = self.attention_score(query_embeddings, key_embeddings, attention_mask, input_shape, device)
        
        return pointing_logits
    
    def loss_fc(self, pointing_logits, edit_pointing, attention_mask, is_training, input_shape, device):
        # pointing classifier part
        pointing_acc = 0
        pointing_act_idx = torch.argmax(pointing_logits, dim=2)
        if edit_pointing is not None and is_training:
            loss_fct = nn.CrossEntropyLoss()
            # Only keep active parts of the loss
            if attention_mask is not None:
                active_mask = attention_mask.view(-1) == 1
                active_logits = pointing_logits.view(-1, input_shape[1])[active_mask]
                active_labels = edit_pointing.view(-1)[active_mask]
                pointing_loss = loss_fct(active_logits, active_labels)
            else:
                pointing_loss = loss_fct(pointing_logits.view(-1, input_shape[1]), edit_pointing.view(-1))

            eq = torch.eq(edit_pointing, pointing_act_idx)
            numerator = (eq * attention_mask).sum()
            denominator = attention_mask.sum()
            pointing_acc = torch.div(numerator, denominator)

        else:
            # if keep probs, skip this.
            # point_logits = torch.argmax(point_logits, dim=2)
            pointing_loss = torch.tensor([0], dtype=torch.float, device=device)
            pointing_acc = torch.tensor([0], dtype=torch.float, device=device)
        
        return pointing_loss, pointing_acc
    
    def att_mask(self, mask, input_shape):
        mask = mask.reshape((input_shape[0], 1, input_shape[1]))
        broadcast_ones = torch.ones((input_shape[0], input_shape[1], 1), device=mask.device, dtype=mask.dtype)
        mask = (broadcast_ones * mask).to(dtype=mask.dtype)
        return mask

    def attention_score(self, query, key, attention_mask, input_shape, device):
        attention_scores = torch.matmul(query, key.transpose(1, 2))
        if attention_mask is not None:
            ext_attention_mask = self.att_mask(attention_mask, input_shape)

            diagonal_mask = 1 - torch.diag_embed(
                torch.ones((input_shape[0], input_shape[1]), device=device, dtype=attention_mask.dtype))
            mask = torch.multiply(diagonal_mask, ext_attention_mask)
            mask_add = -1e9 * (1. - mask)
            attention_scores = attention_scores * mask + mask_add

        return attention_scores
    
class MateModel(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.bert = BertModel(config, add_pooling_layer=False)
        
        self.point_loss_weight = [1.0, 1.0, 1.0]
        
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        self.token_mlm = BertOnlyMLM2Head(config)   
        
        self.token_cls = BertOnlyTokenClsHead(config)

        self.token_ptr = BertOnlyTokenPtrHead(config)
        
        
        self.post_init()

    def forward(
            self,
            input_ids=None,
            attention_mask=None,
            token_type_ids=None,
            position_ids=None,
            head_mask=None,
            inputs_embeds=None,
            edit_tag_ids=None,
            edit_pointing=None,
            mlm_label_ids=None,
            output_attentions=None,
            output_hidden_states=None,
            return_dict=None,
            is_training=True,
    ):
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        
        discriminator_hidden_states = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        transformer_last_hiddenout_d = self.dropout(discriminator_hidden_states[0])
        
        token_tag_logits = self.token_cls.forward(transformer_last_hiddenout_d)
        token_tag_loss, token_tag_acc = self.token_cls.loss_fc(token_tag_logits, edit_tag_ids, attention_mask, is_training, device)
        token_tag_act_idx = torch.argmax(token_tag_logits, dim=2)
        if edit_tag_ids is None: 
            edit_tag_ids = token_tag_act_idx

        token_pointing_logits = self.token_ptr.forward(transformer_last_hiddenout_d, edit_tag_ids, attention_mask, input_ids.size(), device)
        token_pointing_loss, token_pointing_acc = self.token_ptr.loss_fc(token_pointing_logits, edit_pointing, attention_mask, is_training, input_ids.size(), device)
        token_pointing_act_idx = torch.argmax(token_pointing_logits, dim=2)
        
        token_mlm_logits = self.token_mlm.forward(discriminator_hidden_states[0])
        token_mlm_loss, token_mlm_acc = self.token_mlm.loss_fc(token_mlm_logits, mlm_label_ids, is_training, device)
        mlm_act_idx = torch.argmax(token_mlm_logits, dim=2)

        loss = token_tag_loss * self.point_loss_weight[0] + \
                  token_pointing_loss * self.point_loss_weight[1] + \
                    token_mlm_loss * self.point_loss_weight[2]

        # tag_acc, point_acc
        return loss, mlm_act_idx, token_mlm_loss, token_mlm_acc, \
                    token_tag_act_idx, token_tag_loss, token_tag_acc, \
                        token_pointing_act_idx, token_pointing_loss, token_pointing_acc

    