import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel


class SimCSE(nn.Module):
    def __init__(self, config):
        super(SimCSE, self).__init__()
        self.config = config
        self.bert = BertModel.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=config.Bert_path)
        for name, param in self.bert.named_parameters():
            param.requires_grad = True
        self.fc = nn.Linear(768 * 2, 128)
        self.sen = nn.Linear(128 * 3, 2)
        self.pooling = config.pooling

    def forward(self, input_ids):
        out = self.bert(input_ids, output_hidden_states=True)
        if self.pooling == 'last-avg':
            last = out.last_hidden_state.transpose(1, 2)  # [batch, 768, seqlen]
            avg = torch.avg_pool1d(last, kernel_size=last.shape[-1]).squeeze(-1)  # [batch, 768]
            return self.fc(avg)

        if self.pooling == 'first-last-avg':
            first = torch.mean(out.hidden_states[1], dim=1)  # [batch, 1, 768]
            last = torch.mean(out.hidden_states[-1], dim=1)  # [batch, 1, 768]
            output = self.fc(torch.cat([first, last], dim=-1))  # [batch, 1, 128]
            return output

    def simcse_sup_loss(self, y_pred):
        """有监督的损失函数
        无负样本
        y_pred (tensor): bert的输出, [batch_size * 2, 128]
        """
        # 得到y_pred对应的label, 每第三句没有label, 跳过, label= [1, 0, 4, 3, ...]
        y_true = torch.arange(y_pred.shape[0], device=self.config.device)
        # use_row = torch.where((y_true + 1) % 3 != 0)[0]
        y_true = (y_true - y_true % 2 * 2) + 1
        # batch内两两计算相似度, 得到相似度矩阵(对角矩阵)
        sim = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=-1)
        # 将相似度矩阵对角线置为很小的值, 消除自身的影响
        sim = sim - torch.eye(y_pred.shape[0], device=self.config.device) * 1e12
        # 相似度矩阵除以温度系数
        sim = sim / 0.05
        # 计算相似度矩阵与y_true的交叉熵损失
        loss = F.cross_entropy(sim, y_true)
        return loss

    def simcse_sup_loss_neg(self, y_pred):
        """有监督的损失函数
        有负样本
        y_pred (tensor): bert的输出, [batch_size * 3, 768]
        """
        # 得到y_pred对应的label, 每第三句没有label, 跳过, label= [1, 0, 4, 3, ...]
        y_true = torch.arange(y_pred.shape[0], device=self.config.device)
        use_row = torch.where((y_true + 1) % 3 != 0)[0]
        y_true = (use_row - use_row % 3 * 2) + 1
        # batch内两两计算相似度, 得到相似度矩阵(对角矩阵)
        sim = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=-1)
        # 将相似度矩阵对角线置为很小的值, 消除自身的影响
        sim = sim - torch.eye(y_pred.shape[0], device=self.config.device) * 1e12
        # 选取有效的行
        sim = torch.index_select(sim, 0, use_row)
        # 相似度矩阵除以温度系数
        sim = sim / 0.05
        # 计算相似度矩阵与y_true的交叉熵损失
        loss = F.cross_entropy(sim, y_true)
        return loss

    def sen_loss(self, y_pred):
        batch_len = int(y_pred.size(0) / 2)
        label = torch.tensor([0] * batch_len + [1] * batch_len, device=self.config.device)
        pos = torch.cat(
            [torch.cat([y_pred[i], y_pred[i + 1], torch.abs(y_pred[i] - y_pred[i + 1])], dim=-1).unsqueeze(0) for i in
             range(0, batch_len * 2 - 1, 2)], dim=0)
        neg = torch.cat(
            [torch.cat(
                [y_pred[i], y_pred[(i + 6) % y_pred.size(0)], torch.abs(y_pred[i] - y_pred[(i + 2) % y_pred.size(0)])],
                dim=-1).unsqueeze(0) for i in range(0, batch_len * 2 - 1, 2)], dim=0)
        neg_ = torch.std(
            torch.cat(
                [torch.std(torch.abs(y_pred[i] - y_pred[i + 1])).unsqueeze(0) for i in range(0, batch_len * 2 - 1, 2)]))
        neg_std = (1 / torch.abs(neg_).item() - 1) / 100
        meanlen = torch.mean(
            torch.tensor([torch.norm(y_pred[i]) - torch.norm(y_pred[i + 1]) for i in range(0, batch_len * 2 - 1, 2)],
                         device=self.config.device))
        lenloss = torch.pow(meanlen, 2)
        inputs = torch.cat([neg, pos], dim=0)
        assert label.size(0) == inputs.size(0), 'input shape unequal label shape'
        output = self.sen(inputs)
        loss = F.cross_entropy(output, label)
        return loss, lenloss, neg_std

    def sen_loss_neg(self, y_pred):
        batch_len = int(y_pred.size(0) / 3)
        label = torch.tensor([0] * batch_len + [1] * batch_len, device=self.config.device)
        pos = torch.cat(
            [torch.cat([y_pred[i], y_pred[i + 1], torch.abs(y_pred[i] - y_pred[i + 1])], dim=-1).unsqueeze(0) for i in
             range(0, batch_len * 3 - 2, 3)], dim=0)
        neg = torch.cat(
            [torch.cat([y_pred[i], y_pred[i + 1], torch.abs(y_pred[i] - y_pred[i + 1])], dim=-1).unsqueeze(0) for i in
             range(1, batch_len * 3 - 1, 3)], dim=0)
        neg_ = torch.mean(
            torch.cat(
                [torch.std(torch.abs(y_pred[i] - y_pred[i + 1])).unsqueeze(0) for i in range(1, batch_len * 3 - 1, 3)]))
        neg_std = (1 / torch.abs(neg_).item() - 1) / 100
        meanlen = torch.mean(
            torch.tensor([torch.norm(y_pred[i]) - torch.norm(y_pred[i + 1]) for i in range(0, batch_len * 3 - 2, 3)],
                         device=self.config.device))
        lenloss = torch.pow(meanlen, 2)
        inputs = torch.cat([neg, pos], dim=0)
        assert label.size(0) == inputs.size(0), 'input shape unequal label shape'
        output = self.sen(inputs)
        loss = F.cross_entropy(output, label)
        return loss, lenloss, neg_std

    def multi_loss(self, y_pred):
        cos_loss = self.simcse_sup_loss(y_pred)
        sen_loss, lenloss, neg_std = self.sen_loss(y_pred)
        return lenloss, cos_loss, sen_loss, neg_std


if __name__ == '__main__':
    from config import Config
    from transformers import BertTokenizer

    config = Config()
    # tokenizer = BertTokenizer.from_pretrained(config.Bert_path)
    model = SimCSE(config)
    for k,v in model.named_parameters():
        print(k,v.size())
    # print(model(torch.tensor([tokenizer.encode_plus('今天天气不错')['input_ids']])).size())
