# The following piece of code was adapted from https://github.com/kaituoxu/Conv-TasNet
# released under the MIT License.
# Author: Kaituo XU
# Created on 2018/12

from itertools import permutations
import mindspore
import mindspore.ops as ops
import numpy as np
from mindspore import Tensor
import torch.nn.functional as F

EPS = 1e-8


def cal_loss(source, estimate_source, source_lengths):
    """
    Args:
        source: [B, C, T], B is batch size
        estimate_source: [B, C, T]
        source_lengths: [B]
    """
    max_snr, perms, max_snr_idx = cal_si_snr_with_pit(source,estimate_source,source_lengths)
    B, C, T = estimate_source.shape
    mean = ops.ReduceMean()
    loss = 0 - mean(max_snr)

    reorder_estimate_source = reorder_source(
        estimate_source, perms, max_snr_idx)
    return loss, max_snr, estimate_source, reorder_estimate_source


def cal_si_snr_with_pit(source, estimate_source, source_lengths):
    """
    Calculate SI-SNR with PIT training.
    Args:
        source: [B, C, T, L]
        estimate_source: [B, C, T, L]
        source_lengths: [B], each item is between [0, T]
    """
    #assert source.size() == estimate_source.size()
    B, C, T = source.shape
    # mask padding position along T
    mask = get_mask(source, source_lengths)
    estimate_source *= mask

    # Step 1. Zero-mean norm
    cast = ops.Cast()
    num_samples = cast((source_lengths).view(-1, 1, 1), mindspore.float32)   # [B, 1, 1]
    ########设置
    sum = ops.ReduceSum(keep_dims=True)
    _sum = ops.ReduceSum(keep_dims=False)
    mean_target = sum(source, 2) / num_samples
    mean_estimate = sum(estimate_source, 2) / num_samples
    zero_mean_target = source - mean_target
    zero_mean_estimate = estimate_source - mean_estimate
    # mask padding position along T
    zero_mean_target *= mask
    zero_mean_estimate *= mask

    # Step 2. SI-SNR with PIT
    # reshape to use broadcast
    expand_dims = ops.ExpandDims()
    s_target = expand_dims(zero_mean_target,1)  # [B, 1, C, T]
    s_estimate = expand_dims(zero_mean_estimate,2)  # [B, C, 1, T]
    # s_target = <s', s>s / ||s||^2
    pair_wise_dot = sum(s_estimate * s_target,3)  # [B, C, C, 1]
    s_target_energy = sum(s_target ** 2,3) + EPS  # [B, 1, C, 1]
    pair_wise_proj = pair_wise_dot * s_target / s_target_energy  # [B, C, C, T]
    # e_noise = s' - s_target
    e_noise = s_estimate - pair_wise_proj  # [B, C, C, T]
    # SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
    pair_wise_si_snr = _sum(
        pair_wise_proj ** 2,3) / (_sum(e_noise ** 2,3) + EPS)
    ##########
    log = ops.Log()
    # pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + EPS)  # [B, C, C]
    # pair_wise_si_snr = torch.transpose(pair_wise_si_snr, 1, 2)

    pair_wise_si_snr = 10 * log(pair_wise_si_snr + EPS) / log(Tensor(np.array([10.0]), mindspore.float32))  # [B, C, C]
    # Get max_snr of each utterance
    # permutations, [C!, C]
    perms = Tensor(list(permutations(range(C))), dtype=mindspore.int64)
    # one-hot, [C!, C, C]
    scatter = ops.ScatterNd()
    indices = Tensor(np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]), mindspore.int32)
    updates = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
    #shape = (2, 2, 2)
    shape = (1, 1, 1)
    perms_one_hot = scatter(indices, updates, shape)
    # snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])
    matmul = ops.MatMul()
    transpose = ops.Transpose()
    perms_one_hot = transpose(perms_one_hot.view(C, -1), (1, 0))
    snr_set = matmul(pair_wise_si_snr.view(B, -1), perms_one_hot)
    # max_snr_idx = torch.argmax(snr_set, dim=1)  # [B]
    max_snr_idx = ops.Argmax(axis=1, output_type=mindspore.int32)(snr_set)  # [B]
    # max_snr, _ = torch.max(snr_set, dim=1, keepdim=True)
    argmax = ops.ArgMaxWithValue(axis=1, keep_dims=True)
    _, max_snr = argmax(snr_set)
    max_snr /= C
    return max_snr, perms, max_snr_idx


def reorder_source(source, perms, max_snr_idx):
    """
    Args:
        source: [B, C, T]
        perms: [C!, C], permutations
        max_snr_idx: [B], each item is between [0, C!)
    Returns:
        reorder_source: [B, C, T]
    """
    B, C, *_ = source.shape
    # [B, C], permutation whose SI-SNR is max of each utterance
    # for each utterance, reorder estimate source according this permutation
    #max_snr_perm = torch.index_select(perms, dim=0, index=max_snr_idx)
    # print('max_snr_perm', max_snr_perm)
    # maybe use torch.gather()/index_select()/scatter() to impl this?
    max_snr_perm = perms[max_snr_idx, :]
    zeros_like = ops.ZerosLike()
    reorder_source = zeros_like(source)
    for b in range(B):
        for c in range(C):
            reorder_source[b, c] = source[b, max_snr_perm[b][c]]
    return reorder_source


def get_mask(source, source_lengths):
    """
    Args:
        source: [B, C, T]
        source_lengths: [B]
    Returns:
        mask: [B, 1, T]
    """
    B, _, T = source.shape
    ones = ops.Ones()
    mask = ones((B, 1, T), mindspore.float32)
    for i in range(B):
        mask[i, :, source_lengths[i]:] = 0
    return mask

if __name__ == "__main__":
    from mindspore import context
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
    print("______________________ test cal_loss _______________________")
    source = Tensor(np.random.randn(4, 1, 32000), dtype=mindspore.float32)
    source_lengths = Tensor(np.random.randn(4), dtype=mindspore.int32)
    estimate_source = Tensor(np.random.randn(4, 1, 32000), dtype=mindspore.float32)
    loss, max_snr, estimate_source, reorder_estimate_source = \
        cal_loss(source, estimate_source, source_lengths)
    print(loss.shape)
    print(max_snr.shape)
    print(estimate_source.shape)
    print(reorder_estimate_source.shape)