from transformers import BertForMaskedLM, BertForSequenceClassification, BertModel, BertConfig
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
from torch.nn import Linear, CrossEntropyLoss
from torch.nn import functional as F
from torch import nn
import torch


# 去除padding
def get_unpad_vec(token_embeddings, attention_mask):
    output_vectors = []
    # [B,L]------>[B,L,1]------>[B,L,768],矩阵的值是0或者1
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    # 这里做矩阵点积，就是对元素相乘(序列中padding字符，通过乘以0给去掉了)[B,L,768]
    t = token_embeddings * input_mask_expanded
    # [B,768]
    sum_embeddings = torch.sum(t, 1)
    # [B,768],最大值为seq_len
    sum_mask = input_mask_expanded.sum(1)
    # 限定每个元素的最小值是1e-9，保证分母不为0
    sum_mask = torch.clamp(sum_mask, min=1e-9)
    # 得到最后的具体embedding的每一个维度的值——元素相除
    output_vectors.append(sum_embeddings / sum_mask)
    output_vector = torch.cat(output_vectors, 1)
    return output_vector


# 获取当前被mask的token,必须确保输入的每个样本只有一个MASK
# 注意必须保证token_embeddings没有元素等于0
def get_mask_vec(token_embeddings, input_ids):
    # mask_idx = (input_ids == 103).int()  # input_ids,mask的位置为1，其他为0
    # mask_idx_expanded = mask_idx.unsqueeze(-1).expand(token_embeddings.size()).float()
    # 这里做元素相乘，就是对元素相乘(序列中padding字符，通过乘以0给去掉了)[B,L,768]
    # mask_embeddings = token_embeddings * mask_idx_expanded
    mask_idx = torch.nonzero(input_ids == 103, as_tuple=False)[:, 1]
    mask_embeddings = torch.cat([token_embeddings[i, mask_idx[i]].unsqueeze(0) for i in range(len(mask_idx))], dim=0)
    return mask_embeddings


class Bin(nn.Module):
    def __init__(self, conf):
        super().__init__()
        self.bert = BertModel.from_pretrained("bert-base-uncased")
        self.linear = Linear(768, 2)
        self.mode = conf.MODE

    def forward(self, x):
        out = self.bert(**x)
        if self.mode == "pooling":
            out = out.pooler_output
        elif self.mode == "cls":
            out = out.last_hidden_state[:, 0, :]
        elif self.mode == "mask":
            last_hidden_state = self.bert(**x).last_hidden_state
            out = get_mask_vec(last_hidden_state, x["input_ids"])
        out = self.linear(out)
        out = F.sigmoid(out)
        return out


class Tail(nn.Module):
    def __init__(self):
        super().__init__()
        self.bertconf = BertConfig.from_pretrained("bert-base-uncased")
        encoder_layer = nn.TransformerEncoderLayer(d_model=768, nhead=12)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)
        self.cls = BertOnlyMLMHead(self.bertconf)

        # sequence_output = outputs[0]
        # prediction_scores = self.cls(sequence_output)

    def forward(self, x):
        out = self.encoder(src=x)
        out = self.cls(out)
        return out


class OursBin(nn.Module):
    def __init__(self, conf):
        super().__init__()
        self.conf = conf
        self.bert = BertForSequenceClassification.from_pretrained("bert-base-uncased")
        self.tail = Tail()
        self.loss_fct = CrossEntropyLoss()

    def forward(self, inputs, type="train"):
        if type == "train":
            bert_output = self.bert(**inputs[0], labels=inputs[1])
            tailbert = self.bert(output_hidden_states=True, **inputs[2])
            tail_output = self.tail(x=tailbert["hidden_states"][-1])
            tail_loss = self.loss_fct(tail_output.view(-1, tail_output.shape[-1]), inputs[3].view(-1))
            bert_output["tail_loss"] = tail_loss
        elif type == "dev":
            bert_output = self.bert(**inputs[0], labels=inputs[1])
        elif type == "inf":
            bert_output = self.bert(**inputs[0])
        elif type == "checktarget":
            bert_output = self.bert(**inputs[0])
        return bert_output


class Ours(nn.Module):  # Bert后接两次transformer encoder做解码
    def __init__(self, conf):
        super().__init__()
        self.conf = conf
        self.bert = BertForMaskedLM.from_pretrained("bert-base-uncased")
        self.tail = Tail()
        self.loss_fct = CrossEntropyLoss()

    def forward(self, inputs, type="train"):
        if type == "train":
            bert_output = self.bert(labels=inputs[1], **inputs[0])
            tailbert = self.bert(output_hidden_states=True, labels=inputs[3], **inputs[2])
            tail_output = self.tail(x=tailbert["hidden_states"][-1])
            tail_loss = self.loss_fct(tail_output.view(-1, tail_output.shape[-1]), inputs[3].view(-1))
            bert_output["tail_loss"] = tail_loss
        if type != "train":
            bert_output = self.bert(**inputs[0])
        return bert_output
