import torch
def get_grad(data,model):
    lossfun = torch.nn.CrossEntropyLoss()
    model.train()
    model.first_bn.eval()
    model.block0[0].bn2.eval()
    model.block1[0].bn1.eval()
    model.block1[0].bn2.eval()
    model.block2[0].bn1.eval()
    model.block2[0].bn2.eval()
    model.block3[0].bn1.eval()
    model.block3[0].bn2.eval()
    model.block4[0].bn1.eval()
    model.block4[0].bn2.eval()
    model.block5[0].bn1.eval()
    model.block5[0].bn2.eval()
    model.bn_before_gru.eval()
    data = torch.Tensor(data)
    adv_wav=data
    adv_wav=adv_wav.reshape(1,-1)
    adv_wav = adv_wav.cuda()
    batch_x = model(adv_wav)
    init_pred = torch.max(batch_x, dim=1)[1].item()  # 计算最大预测
    print("原始分类:",init_pred)
    targety=torch.Tensor([0.,1.])
    targety = targety.cuda()
    targety = targety.reshape(1, 2)

    perturbed_wav = adv_wav.detach().clone().cuda()
    perturbed_wav.requires_grad = True
    batch_x = model(perturbed_wav)
    loss = lossfun(batch_x, targety)
    model.zero_grad()
    loss.backward()
    data_grad = perturbed_wav.grad.data
    sign_data_grad = data_grad.sign()
    return sign_data_grad.squeeze(0).cpu().numpy()

