import torch
import torch.nn as nn
from transformers import RobertaConfig, RobertaForMaskedLM, BertForPreTraining
import torch.nn.functional as F
import random
import numpy as np
random.seed(2021)
class ModelForMLM(nn.Module):
    def __init__(self, pretrained_path, config):
        super().__init__()
        self.Roberta_for_mlm = RobertaForMaskedLM.from_pretrained(pretrained_path)
        self.pooler = nn.Sequential(
            nn.Linear(config.hidden_size, config.hidden_size),
            nn.LayerNorm(config.hidden_size, eps=1e-05),
            nn.Tanh()
        )
        self.cls = nn.Linear(config.hidden_size, 2)

    def resize_token_embeddings(self, len):
        self.Roberta_for_mlm.resize_token_embeddings(len)

    def forward(self, input_ids, token_type_ids, attention_mask, labels, isnext= None, return_dict=None):
        masked_lm_out, last_sequence_output= self.Roberta_for_mlm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels,
                                 return_dict=True)
        # NSP it will be decided by first token(CLS)
        pooled_output = self.pooler(last_sequence_output[:, 0])
        seq_relationship_scores = self.cls(pooled_output)
        # compute NSP loss
        triplet_loss, next_sentence_loss = 0, 0
        if isnext is not None:
            loss_fct = nn.CrossEntropyLoss()
            next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), isnext.view(-1))
        else:
            # compute triplate loss 输入需要变成单句输入(TODO)这里有问题
            CLS = pooled_output  # 取出全部句子的CLS token# [batch,hid_dim]
            pos_dist = F.pairwise_distance(CLS[0::2], CLS[1::2])
            li = []
            for i in range(CLS[0::2].size(0)):
                all = list(range(0, CLS[0::2].size(0)))
                all.remove(i)
                li.append(random.choice(all))
            fu = CLS[0::2][torch.tensor(li)]
            neg_dist = F.pairwise_distance(CLS[0::2], fu)  # 负例来自于全部描述和其余描述之间的距离
            triplet_loss = (1 + pos_dist - neg_dist).clamp(min=0).mean()
        return masked_lm_out.loss+next_sentence_loss
        # return masked_lm_out.loss + next_sentence_loss + triplet_loss, torch.tensor(next_sentence_loss).cuda()




    def predict(self, input_ids, token_type_ids, attention_mask, labels, isnext=None, return_dict=None):
        masked_lm_out, last_sequence_output = self.Roberta_for_mlm(input_ids, attention_mask=attention_mask,
                                                                   token_type_ids=token_type_ids, labels=labels,
                                                                   return_dict=True)

        prediction_scores = masked_lm_out.logits  # torch.Size([16, 512, 30180])
        pooled_output = self.pooler(last_sequence_output[:, 0])
        seq_relationship_scores = self.cls(pooled_output)
        next_sentence_loss = 0
        if isnext is not None:
            loss_fct = nn.CrossEntropyLoss()
            next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), isnext.view(-1))

        return seq_relationship_scores


