import torch
from .losses import Loss
import math
import torch.nn.functional as F

def bt_loss(h1: torch.Tensor, h2: torch.Tensor, lambda_, batch_norm=True, eps=1e-15, *args, **kwargs):
    batch_size = h1.size(0)
    feature_dim = h1.size(1)

    if lambda_ is None:
        lambda_ = 1. / feature_dim

    if batch_norm:
        z1_norm = (h1 - h1.mean(dim=0)) / (h1.std(dim=0) + eps)
        z2_norm = (h2 - h2.mean(dim=0)) / (h2.std(dim=0) + eps)
        c = (z1_norm.T @ z2_norm) / batch_size
    else:
        c = h1.T @ h2 / batch_size

    off_diagonal_mask = ~torch.eye(feature_dim).bool()
    loss = (1 - c.diagonal()).pow(2).sum()
    loss += lambda_ * c[off_diagonal_mask].pow(2).sum()
    return loss

def RFF(x, factor, seed):
    #torch.manual_seed(seed)
    for f in range(factor):
        #采样 w 从 N(0,1)
        w = torch.randn(1)
        # 采样 phi 从0-2pi的均匀分布中采样
        phi = torch.rand(1)*2*math.pi
        #phi = 1.618*f

        h = torch.cos(w * x + phi) * math.sqrt(2)
        if f == 0:
            all_h = h
        else:
            all_h = torch.cat((all_h, h), 1)
    return all_h


def RFF_fixed(x, ws, phis):
    factor = ws.size(0)
    for f in range(factor):
        w = ws[f]
        phi = phis[f]
        h = torch.cos(w * x + phi) * math.sqrt(2)
        if f == 0:
            all_h = h
        else:
            all_h = torch.cat((all_h, h), 1)
    return all_h

def byol_loss_fn(x, y):
    x = F.normalize(x, dim=-1, p=2)
    y = F.normalize(y, dim=-1, p=2)
    return 2 - 2 * (x * y).sum(dim=-1)

def hsic_loss(h1: torch.Tensor, h2: torch.Tensor, lambda_, batch_norm=False, eps=1e-15, *args, **kwargs):
    batch_size = h1.size(0)
    feature_dim = h1.size(1)

    if lambda_ is None:
        lambda_ = 1. / feature_dim

    #h1 = h1 - h1.mean(dim=0)
    #h2 = h2 - h2.mean(dim=0)

    if batch_norm:
        z1_norm = (h1 - h1.mean(dim=0)) / (h1.std(dim=0) + eps)
        z2_norm = (h2 - h2.mean(dim=0)) / (h2.std(dim=0) + eps)
        c = (z1_norm.T @ z2_norm) / batch_size
    else:
        c = h1.T @ h2 / batch_size

    off_diagonal_mask = ~torch.eye(feature_dim).bool()
    on_diag_loss = (1 - c.diagonal()).pow(2).sum() * 0.0
    off_diag_loss = lambda_ * (1+c[off_diagonal_mask]).pow(2).sum()
    return on_diag_loss + off_diag_loss, on_diag_loss


class HSIC(Loss):
    def __init__(self, lambda_: float = None, batch_norm: bool = False, eps: float = 1e-15,
                 ws: torch.Tensor = None, phis: torch.Tensor = None):
        self.lambda_ = lambda_
        self.batch_norm = batch_norm
        self.eps = eps
        self.ws = ws  # 2 * factor, 0 for anchor, 1 for sample
        self.phis = phis # 2 * factor, 0 for anchor, 1 for sample

    def compute(self, anchor, sample, pos_mask, neg_mask, *args, **kwargs) -> torch.FloatTensor:
        #anchor = RFF(anchor, self.factor, 100)
        #sample = RFF(sample, self.factor, 200)

        byolloss = byol_loss_fn(anchor, sample)
        #anchor = F.normalize(anchor)
        #sample = F.normalize(sample)
        anchor = RFF_fixed(anchor, self.ws[0,:], self.phis[0,:])
        sample = RFF_fixed(sample, self.ws[1,:], self.phis[1,:])

        loss, onloss = hsic_loss(anchor, sample, self.lambda_, self.batch_norm, self.eps)

        loss += byolloss.sum()

        factor = self.ws.size(1)
        loss /= factor
        onloss /= factor
        return loss, byolloss.sum()
