import torch

torch.seed(878373947641000)
from itertools import permutations


def get_mask(source, source_lengths):
    """
    Args:
        source: [B, C, T]
        source_lengths: [B]
    Returns:
        mask: [B, 1, T]
    """
    B, _, T = source.size()
    mask = source.new_ones((B, 1, T))
    for i in range(B):
        mask[i, :, source_lengths[i][0] :] = 0
    return mask


source = torch.randn(2, 2, 2)
estimate_source = torch.randn(2, 2, 2)
source_lengths = source_lengths = torch.full(
    (estimate_source.size(0), 1), estimate_source.size(2)
).to("cpu")


assert source.size() == estimate_source.size()
B, C, T = source.size()
# mask padding position along T
mask = get_mask(source, source_lengths)
estimate_source *= mask

# Step 1. Zero-mean norm
num_samples = source_lengths.view(-1, 1, 1).float()  # [B, 1, 1]
mean_target = torch.sum(source, dim=2, keepdim=True) / num_samples
mean_estimate = torch.sum(estimate_source, dim=2, keepdim=True) / num_samples
zero_mean_target = source - mean_target
zero_mean_estimate = estimate_source - mean_estimate
# mask padding position along T
zero_mean_target *= mask
zero_mean_estimate *= mask

# Step 2. SI-SNR with PIT
# reshape to use broadcast
s_target = torch.unsqueeze(zero_mean_target, dim=1)  # [B, 1, C, T]
s_estimate = torch.unsqueeze(zero_mean_estimate, dim=2)  # [B, C, 1, T]
print(s_target)
print(s_estimate)

s_target_square = torch.sum(s_target**2, dim=3, keepdim=True)  # [B,1,C,1]
print(f"s target {s_target_square.shape}")
print(s_target_square)

alpha = torch.sum(s_estimate * s_target, dim=3, keepdim=True) / torch.sum(
    s_estimate**2, dim=3, keepdim=True
)  # [B,C,C,1]
print(f"alpha shape {alpha.shape}")

denominator = alpha * s_estimate - s_target.transpose(1, 2)
denominator = torch.sum(denominator**2, dim=3, keepdim=True)  # [B,C,C,T]

pair_wise_si_snr = s_target_square / denominator

pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr)
pair_wise_si_snr = pair_wise_si_snr.squeeze(3)

print(f"denominator {denominator.shape}")

# Get max_snr of each utterance
# permutations, [C!, C]
perms = source.new_tensor(list(permutations(range(C))), dtype=torch.long)
# one-hot, [C!, C, C]
index = torch.unsqueeze(perms, 2)
perms_one_hot = source.new_zeros((*perms.size(), C)).scatter_(2, index, 1)
# [B, C!] <- [B, C, C] einsum [C!, C, C], SI-SNR sum of each permutation
snr_set = torch.einsum("bij,pij->bp", [pair_wise_si_snr, perms_one_hot])
max_snr_idx = torch.argmax(snr_set, dim=1)  # [B]
# max_snr = torch.gather(snr_set, 1, max_snr_idx.view(-1, 1))  # [B, 1]
max_snr, _ = torch.max(snr_set, dim=1, keepdim=True)
max_snr /= C
