import imp
from ahoproc_tools.io import *
from ahoproc_tools.interpolate import *
import multiprocessing as mp
from scipy.io import wavfile
import tempfile
import timeit
import glob
import os
import mindspore as ms
from mindspore import nn

def get_grads(model):
    # TODO: mindspore不会将梯度保存到对应的参数tensor上，这里看后面应该怎么改比较好
    raise NotImplementedError

def make_optimizer(otype, params, lr, step_lr=None, lr_gamma=None, 
                   adam_beta1=0.7, weight_decay=0.):
    if otype == 'rmsprop':
        opt = nn.RMSProp(params, learning_rate=lr,
                         weight_decay=weight_decay)
    else:
        opt = nn.Adam(params, learning_rate=lr, beta1=adam_beta1, 
                      beta2=0.9, weight_decay=weight_decay)
    # if step_lr is not None:
    #     sched = lr_scheduler.StepLR(opt, step_lr, lr_gamma)
    # else:
    #     sched = None
    return opt, None

def KLD(mean_p, std_p, mean_g, std_g):
    # assumping 2 normal distributions with respective mean and stds
    # log(var_g / var_p) + (var_p + (mean_p - mean_g)^2)/( 2*var_g) - 0.5
    var_p = std_p ** 2
    var_g = std_g ** 2
    num = var_p + (mean_p - mean_g) ** 2
    #print('mean_g: ', mean_g)
    #print('mean_p: ', mean_p)
    #print('std_g: ', std_g)
    #print('std_p: ', std_p)
    #print('var_g: ', var_g)
    #print('var_p: ', var_p)
    return ms.ops.log(std_g / std_p + 1e-22) +  (num / (2 * var_g + 1e-22)) - 0.5 

def compute_MAE(v_lf0, v_ref_lf0, mask):
    #if len(v_lf0.size()) == 2:
    #    v_lf0 = v_lf0.view(-1)
    #    v_ref_lf0 = v_ref_lf0.view(-1)
    #return torch.mean(torch.abs(torch.exp(v_lf0) - torch.exp(v_ref_lf0)))
    print(mask.size())
    print(v_lf0.size())
    print(v_ref_lf0.size())
    if mask.size(1) > v_lf0.size(1):
        mask = mask[:, :v_lf0.size(1)]
        v_ref_lf0 = v_ref_lf0[:, :v_lf0.size(1)]
    if mask.size(1) < v_lf0.size(1):
        v_lf0 = v_lf0[:, :mask.size(1)]
    abs_dif = ms.ops.abs(ms.ops.exp(v_lf0) - ms.ops.exp(v_ref_lf0)) * mask
    return ms.ops.reduce_sum(abs_dif, axis=1) / ms.ops.reduce_sum(mask, axis=1)

def compute_accuracy(uv, ref_uv):
    if ref_uv.size(1) > uv.size(1):
        ref_uv = ref_uv[:, :uv.size(1)]
    return ms.ops.reduce_mean(ms.ops.cast(ms.ops.equal(uv, ref_uv.view(uv.shape)), ms.float32),  axis=1)

def convert_wav(wav):
    # denorm and store wav to tmp file
    f = tempfile.NamedTemporaryFile(delete=False)
    fname = f.name
    ii16 = np.iinfo(np.int16)
    wav = wav * ii16.min
    wav = wav.astype(np.int16)
    wavfile.write(f, 16000, wav)
    #print('stored wav in file: ', fname)
    # convert gwav to aco feats
    aco_name = wav2aco(fname)
    if os.path.exists(aco_name + '.lf0'):
        lf0 = read_aco_file(aco_name + '.lf0', (-1, 1))
        ilf0, uv =  interpolation(lf0, -10000000000)
        return ilf0, uv, fname
    else:
        # ahocoder can be random
        return None, None, None

def select_voiced(params):
    lf0, uv, ref_lf0, ref_uv  = params
    # first mask out unvoiced values from uvs
    mask = uv * ref_uv
    if np.sum(mask) == 0:
        return [], []
    v_lf0 = lf0[np.where(mask > 0)]
    v_ref_lf0 = ref_lf0[np.where(mask > 0)]
    return v_lf0, v_ref_lf0


class F0Evaluator(object):

    def __init__(self, f0_gtruth_dir=None,  num_proc=30, cuda=False):
        raise NotImplementedError