import torch
import logging
import sys
import time


def todevice(tensor, device):
    if isinstance(tensor, list) or isinstance(tensor, tuple):
        assert isinstance(tensor[0], torch.Tensor)
        return [todevice(t, device) for t in tensor]
    elif isinstance(tensor, torch.Tensor):
        return tensor.to(device)
    
def loss_dependence(emb1, emb2, dim):
    """
    作用是计算两个特征的相关性，dim是特征的维度
    """
    bs = emb1.size(0)
    R = torch.eye(dim).cuda() - (1 / dim) * torch.ones(dim, dim).cuda()  # 生成一个对角线为1，其余为0的矩阵
    K1 = torch.bmm(emb1, torch.transpose(emb1, 1, 2))  # 计算两个特征的协方差矩阵
    K2 = torch.bmm(emb2, torch.transpose(emb2, 1, 2))
    RK1 = torch.bmm(R.expand_as(K1), K1)
    RK2 = torch.bmm(R.expand_as(K2), K2)
    ans = torch.bmm(RK1, RK2)
    HSIC = 0
    for index in range(bs):
        HSIC += torch.trace(ans[index])
    return HSIC

def common_loss(emb1, emb2):
    """
    作用是计算两个特征的协方差矩阵，然后计算两个协方差矩阵的差的平方的均值
    """
    emb1 = emb1 - torch.mean(emb1, dim=1, keepdim=True)
    emb2 = emb2 - torch.mean(emb2, dim=1, keepdim=True)
    emb1 = torch.nn.functional.normalize(emb1, p=2, dim=2)
    emb2 = torch.nn.functional.normalize(emb2, p=2, dim=2)
    cov1 = torch.bmm(emb1, torch.transpose(emb1, 1, 2))
    cov2 = torch.bmm(emb2, torch.transpose(emb2, 1, 2))
    cost = torch.mean((cov1 - cov2) ** 2)
    return cost

def batch_accuracy(predicted, true):
    """ Compute the accuracies for a batch of predictions and answers """
    predicted = predicted.detach().argmax(1)  # argmax返回最大值的索引
    agreeing = (predicted == true)
    return agreeing

def step_decay(opt, optimizer):
    # compute the new learning rate based on decay rate
    opt.lr *= 0.5
    logging.info("Reduced learning rate to {}".format(opt.lr))
    sys.stdout.flush()
    for param_group in optimizer.param_groups:
        param_group['lr'] = opt.lr

    return optimizer

def save_checkpoint(epoch, model, optimizer, model_kwargs, filename):
    state = {
        'epoch': epoch,
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'model_kwargs': model_kwargs,
    }
    time.sleep(10)
    torch.save(state, filename)