import torch
import torch.nn as nn


class Information_Entropy_Loss(nn.Module):
    def __init__(self):
        super().__init__()


    def forward(self,atte):
        if len(atte.shape) == 4:
            atte = atte.mean(dim=1)
        entropy = -torch.sum(
            atte * torch.log(atte + 1e-8),
            dim=-1
        )
        entropy = torch.sum(entropy,dim = -1)
        return torch.mean(entropy)

class Variation_Loss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self,atte):
        if len(atte.shape) == 4:
            atte = atte.mean(dim=1)
        var = -torch.var(
            atte, dim=-1
        )
        var = torch.sum(var, dim=-1)
        return torch.mean(var)


class BPRLoss(nn.Module):
    def __init__(self, lambda_reg=0.01, ):
        super(BPRLoss, self).__init__()
        self.lambda_reg = lambda_reg
        self.softmax = nn.Softmax(dim =-1)

    def kl_divergence(self,mean1, cov1, mean2, cov2):
        B, D = mean1.shape
        cov2_inv = torch.inverse(cov2)
        trace_terms = torch.stack([torch.trace(cov2_inv[b] @ cov1[b]) for b in range(B)])
        mean_diff = mean2 - mean1
        mean_terms = torch.stack([
            mean_diff[b].unsqueeze(0) @ cov2_inv[b] @ mean_diff[b].unsqueeze(1)
            for b in range(B)
        ]).squeeze()
        log_det_terms = torch.logdet(cov2) - torch.logdet(cov1)
        kl = 0.5 * (trace_terms + mean_terms - D + log_det_terms)
        return kl
    def js_divergence(self,mean1, cov1, mean2, cov2):
        mean_m = 0.5 * (mean1 + mean2)
        cov_m = 0.5 * (cov1 + cov2)
        kl1 = self.kl_divergence(mean1, cov1, mean_m, cov_m)
        kl2 = self.kl_divergence(mean2, cov2, mean_m, cov_m)
        return 0.5 * (kl1 + kl2)
    def forward(self, pos_us, neg_us, target_embed, model_parameters):
        b,q,d = pos_us.shape
        if len(target_embed.shape) == 3:
            target_embed = target_embed.squeeze(1)
        cov1 = torch.eye(d, dtype=pos_us.dtype, requires_grad=False,device=pos_us.device).unsqueeze(0).repeat(b, 1, 1) 
        cov2 = torch.eye(d, dtype=neg_us.dtype, requires_grad=False,device=pos_us.device).unsqueeze(0).repeat(b, 1, 1)

        pos_js = []
        neg_js = []

        for i in range(q):
            pos_u = pos_us[:, i, :]
            neg_u = neg_us[:,i, :]
            pos_js.append(self.js_divergence(pos_u, cov1, target_embed, cov2).unsqueeze(1))
            neg_js.append(self.js_divergence(neg_u, cov1, target_embed, cov2).unsqueeze(1))

        diff_scores =  torch.concat(neg_js, dim=1).mean(dim=1) - torch.concat(pos_js, dim=1).mean(dim=1)
        log_prob = torch.log(torch.sigmoid(diff_scores))
        
        loss = -torch.mean(log_prob)
        
        reg_loss = 0
        for param in model_parameters:
            reg_loss += torch.norm(param)**2
        loss += self.lambda_reg * reg_loss
        
        return loss


class Interest_Difference_Loss(nn.Module):
    def __init__(self, q_num,
                 short_time,
                 ):
        super().__init__()
        self.q_num = q_num
        self.short_time = short_time

    def forward(self, atte):
        b = atte.shape[0]
        if len(atte.shape) == 4:
            atte = atte.mean(dim=1)
        q_atte = atte[:, -1 * self.q_num:, :]

        epsilon = 1e-8
        batch_js = torch.tensor(0).float().to(atte.device)
        js = torch.zeros((b,)).float().to(atte.device)
        for i in range(self.q_num):
            p = q_atte[:, i, :]
            for j in range(i + 1, self.q_num):
                q = q_atte[:, j, :]
                m = 0.5 * (p + q)
                kl_p_m = torch.sum(p * torch.log((p + epsilon) / (m + epsilon)), dim=1)
                kl_q_m = torch.sum(q * torch.log((q + epsilon) / (m + epsilon)), dim=1)
                js += 0.5 * kl_p_m + 0.5 * kl_q_m

        batch_js += js.mean()
        return -batch_js


if __name__ == '__main__':
    q_num = 4
    short_q_num = 4
    early_q_num = 4
    sequence_len = 100

    atte = nn.Softmax(dim=-1)(torch.randn((10, q_num + short_q_num + early_q_num + sequence_len,
                                           q_num + short_q_num + early_q_num + sequence_len), requires_grad=True))
    infor_ent = Information_Entropy_Loss()(atte)
    idl = Interest_Difference_Loss(q_num, short_q_num, early_q_num, 20, 20)(atte)
    print(infor_ent)
    print(idl)