import einops
import torch
import torch.nn as nn
import torch.nn.functional as F
from pretrain_models.utils import patchify

def forward_constrast_loss(x_i, x_j, temp=0.5):
    device = x_i.device
    batch_size = x_i.shape[0]
    temp = torch.tensor(temp).to(device)
    neg_mask = (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool).to(device)).float()
    z_i = F.normalize(x_i, dim=1)
    z_j = F.normalize(x_j, dim=1)
    z = torch.cat([z_i, z_j], dim=0)
    sim = F.cosine_similarity(z.unsqueeze(1),
                              z.unsqueeze(0),
                              dim=2)
    sim_ij = torch.diag(sim, batch_size)
    sim_ji = torch.diag(sim, -batch_size)
    pos = torch.cat([sim_ij, sim_ji], dim=0)
    nom = torch.exp(pos / temp)
    denom = neg_mask * torch.exp(sim / temp)
    return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * batch_size)

def forward_loss_rotate(pred_rotate, label_rotate):
    # 传入pred_rotate: (b, 4) 四个方向
    # label_rotate: (b)
    loss_func = nn.CrossEntropyLoss()
    loss = loss_func(pred_rotate, label_rotate)
    return loss

def forward_loss_reconstruct(pred, labels):
    loss = (pred - labels) ** 2
    loss = loss.mean()  # [N, L], mean loss per patch
    return loss

def forward_loss_reconstruct_mask(pred, labels, mask):
    # pred (b c d w h)
    # pred = torch.einsum("")

    loss = (pred - labels) ** 2

    loss = loss.mean(dim=-1)  # [N, L], mean loss per patch
    loss = (loss * mask).sum() / mask.sum()  # mean loss on removed patches
    return loss

def forward_loss_similarity(pred_1, pred_2):
    ## pred_1: 对batch size 的图像第一次编码结果向量
    ## pred_2: (b, hidden_size）对 batch size 图像第二次编码结果向量。
    ## pred_1 中第n个句子 跟 pred_2 中第 n 个句子相似 pred_1 内部互相不相似。
    loss_fct = nn.CrossEntropyLoss()
    device = pred_1.device
    cos = nn.CosineSimilarity(dim=-1)
    sim = cos(pred_1.unsqueeze(dim=1), pred_2.unsqueeze(dim=0))
    labels = torch.arange(sim.shape[0], dtype=torch.long).to(device)

    loss = loss_fct(sim, labels)

    return loss

def forward_loss_mask_region(pred_bottom_feature, mask_labels):

    # pred_bottom_feature = einops.rearrange(pred_bottom_feature, "b d w h c->b (d w h) c")
    loss_fct = nn.CrossEntropyLoss()
    loss = loss_fct(pred_bottom_feature.reshape(-1, pred_bottom_feature.shape[-1]), mask_labels.reshape(-1))
    return loss

def forward_loss_mask(pred_bottom_feature, mask_labels):
    mask_labels = mask_labels.long()
    loss_fct = nn.CrossEntropyLoss()
    loss = loss_fct(pred_bottom_feature.reshape(-1, pred_bottom_feature.shape[-1]), mask_labels.reshape(-1))
    return loss

def forward_loss_mask_region_patch(pred_bottom_feature_patch, mask_labels_patch):

    loss_fct = nn.CrossEntropyLoss()
    loss = loss_fct(pred_bottom_feature_patch.reshape(-1, pred_bottom_feature_patch.shape[-1]), mask_labels_patch.reshape(-1))
    return loss

def forward_loss_mask_position(pred_bottom_feature, mask_labels):
    mask_labels = mask_labels.long()
    mask_labels = mask_labels.view(-1)
    pred_bottom_feature = pred_bottom_feature.view(-1, pred_bottom_feature.shape[-1])
    loss_fct = nn.CrossEntropyLoss()
    loss = loss_fct(pred_bottom_feature, mask_labels)
    return loss

def forward_loss_mask_region_multi_label(pred_bottom_feature, mask_labels):
    pred_bottom_feature = einops.rearrange(pred_bottom_feature, "b c d w h->b (d w h) c")
    loss_fct = nn.BCEWithLogitsLoss()
    loss = loss_fct(pred_bottom_feature, mask_labels)
    return loss
