import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig, AutoTokenizer


class PromptBert(nn.Module):
    def __init__(self, config):
        super(PromptBert, self).__init__()
        self.config = config
        conf = AutoConfig.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=config.model_path)
        conf.attention_probs_dropout_prob = config.dropout_prob
        conf.hidden_dropout_prob = config.dropout_prob
        self.model = AutoModel.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=config.model_path, config=conf)
        for name, param in self.model.named_parameters():
            param.requires_grad = True
        self.refect = nn.Linear(768, 128, bias=False)

    def forward(self, inputs, template_inputs):
        sentence_out = self.model(inputs)
        senids = (inputs == self.config.mask_ids).long().float().unsqueeze(1)
        sen_embedding = torch.matmul(senids, sentence_out[0])
        tmp_out = self.model(template_inputs)
        tmpids = (template_inputs == self.config.mask_ids).long().float().unsqueeze(1)
        tmp_embedding = torch.matmul(tmpids, tmp_out[0])
        sentence_embedding = sen_embedding - tmp_embedding
        embedding = self.refect(sentence_embedding.view(-1, 768))
        return embedding


def compute_loss(query, key, tao=0.05):
    query = torch.div(query, torch.norm(query, dim=1).reshape(-1, 1))
    key = torch.div(key, torch.norm(key, dim=1).reshape(-1, 1))
    # print(query.shape, key.shape)
    N, D = query.shape[0], query.shape[1]

    # calculate positive similarity
    batch_pos = torch.exp(torch.div(torch.bmm(query.view(N, 1, D), key.view(N, D, 1)).view(N, 1), tao))

    # calculate inner_batch all similarity
    batch_all = torch.sum(torch.exp(torch.div(torch.mm(query.view(N, D), torch.t(key)), tao)), dim=1)

    loss = torch.mean(-torch.log(torch.div(batch_pos, batch_all)))
    neg = torch.cat([key[2:], key[:2]], dim=0)
    assert query.size(0) == key.size(0), 'batch size error'
    sim = F.cosine_similarity(query, key, dim=-1)
    unsim = F.cosine_similarity(query, neg, dim=-1)
    pos = torch.mean(torch.exp(sim))
    neg = torch.sum(torch.exp(unsim))
    simloss = torch.div(pos, (neg + 1e-5))
    return loss + simloss


# def simloss(query, key):
#     query = torch.div(query, torch.norm(query, dim=1).reshape(-1, 1))
#     key = torch.div(key, torch.norm(key, dim=1).reshape(-1, 1))
#     neg = torch.cat([key[2:], key[:2]], dim=0)
#     assert query.size(0) == key.size(0), 'batch size error'
#     sim = F.cosine_similarity(query, key, dim=-1)
#     unsim = F.cosine_similarity(query, neg, dim=-1)
#     pos = torch.mean(torch.exp(sim))
#     neg = torch.sum(torch.exp(unsim))
#     loss = - torch.log(torch.div(pos, (neg + 1e-5)))
#     return loss

def infoloss(config, data):
    tmp1, tmp2, sim1, sim2 = data
    oriloss = simloss(config, tmp1, tmp2)
    sloss = simloss(config, sim1, sim2)
    osloss = simloss(config, tmp1, sim1)
    soloss = simloss(config, tmp2, sim2)
    return oriloss + sloss + osloss + soloss


def simloss(config, query, key):
    query = torch.div(query, torch.norm(query, dim=1).reshape(-1, 1))
    key = torch.div(key, torch.norm(key, dim=1).reshape(-1, 1))
    assert query.size(0) == key.size(0), 'batch size error'
    y_pred = torch.cat([query, key], dim=0)
    # batch内两两计算相似度, 得到相似度矩阵(对角矩阵)
    sim = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=-1)
    # 将相似度矩阵对角线置为很小的值, 消除自身的影响
    sim = sim - torch.eye(y_pred.shape[0], device=config.device) * 1e12
    l = query.size(0)
    lable1 = torch.cat([torch.zeros([l, l], device=config.device), torch.eye(l, device=config.device)], dim=-1)
    lable2 = torch.cat([torch.eye(l, device=config.device), torch.zeros([l, l], device=config.device)], dim=-1)
    y_true = torch.cat([lable1, lable2], dim=0)
    y_true = torch.where(y_true == 1)[-1]
    # 相似度矩阵除以温度系数
    sim = sim / 0.05
    # 计算相似度矩阵与y_true的交叉熵损失
    loss = F.cross_entropy(sim, y_true.long())
    return loss


if __name__ == '__main__':
    from config import Config
    from transformers import BertTokenizer

    config = Config()
    tokenizer = BertTokenizer.from_pretrained(config.model_path)
    model = PromptBert(config)
    a = tokenizer.encode_plus('今天天气[MASK]不错', return_tensors='pt')['input_ids']
    b = tokenizer.encode_plus('今[MASK]天天气不1错', return_tensors='pt')['input_ids']
    print(model(a, b))
