from ..datasets import *
from ..utils import *
from .ops import *
from scipy.io import wavfile
import multiprocessing as mp
import numpy as np
import timeit
import random
from random import shuffle
from tensorboardX import SummaryWriter
from .generator import *
from .discriminator import *
from .core import *
import json
import os
from scipy import signal
import mindspore as ms
from mindspore.common.initializer import initializer, XavierUniform, Normal
from segan.datasets import collate_fn

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv1DResBlock') != -1:
        print('Initializing weights of convresblock to 0.0, 0.02')
    for k, p in m.parameters_and_names():
        if 'weight' in k and 'conv' in k:
            p = initializer(Normal(0.02, 0.0), p.shape)
        elif classname.find('Conv1d') != -1:
            print('Initializing weight to 0.0 0.02 for module ', m)
            m.weight = initializer(Normal(0.02, 0.0), m.weight.shape)
            if hasattr(m, 'bias') and m.bias is not None:
                print('bias to 0 for module: ', m)
                m.bias = ops.zeros_like(m.bias)
        elif classname.find('Linear') != -1:
            print('Initializing FC weight to xavier uniform')
            m.weight = initializer(XavierUniform(), m.weight.shape)

def wsegan_weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv1DResBlock') != -1:
        print('Initializing weights of convresblock to 0.0, 0.02')
        for k, p in m.parameters_and_names():
            if 'weight' in k and 'conv' in k:
                p = initializer(XavierUniform(), p.shape)
    elif classname.find('Conv1d') != -1:
        print('Initializing weight to tXU for module: ', m)
        m.weight = initializer(XavierUniform(), m.weight.shape)
    elif classname.find('ConvTransposed1d') != -1:
        print('Initializing weight to XU for module :', m)
        m.weight = initializer(XavierUniform(), m.weight.shape)
    elif classname.find('Linear') != -1:
        print('Initializing FC weight to XU')
        m.weight = initializer(XavierUniform(), m.weight.shape)
    
def z_fropout(m):
    classname = m.__class__.__name__
    if classname.find('Dropout') != -1:
        m.set_train()
    else:
        # pytorch的实际做法就是设置参数为False
        m.set_train(False)

def apply(m, fn):
    for cell in m.cells():
        apply(cell, fn)
    fn(m)

#这里的WithLossCellG/D, 计算结果分别是infer_G和infer_D的内容，
class WithLossCellG(nn.Cell):

    def __init__(self, netD, netG, loss_fn):
        super(WithLossCellG, self).__init__(auto_prefix=True)
        self.netD = netD
        self.netG = netG
        self.loss_fn = loss_fn

    def construct(self, noisy_wav, z=None, ret_hid=False):
        
        Genh = self.netG(noisy_wav, z=z, ret_hid=ret_hid)
         
        out = self.netD(Genh)
        label_real = ops.OnesLike()(out)
        loss = self.loss_fn(out, label_real)
        return loss

class WithLossCellD(nn.Cell):

    def __init__(self, netD, netG, loss_fn):
        super(WithLossCellD, self).__init__(auto_prefix=True)
        self.netD = netD
        self.netG = netG
        self.loss_fn = loss_fn

    # real_data在这里就对应的clean_wav, 每次infer之前需要拼接
    def construct(self, clean_wav, noisy_wav, z=None, ret_hid=False):
        d_input = ms.ops.concat((clean_wav, noisy_wav), axis=1)
        out_real = self.netD(d_input)
        label_real = ops.OnesLike()(out_real)
        loss_real = self.loss_fn(out_real, label_real)

        
        Genh = self.netG(noisy_wav, z=z, ret_hid=ret_hid)
        d_input_fake = ms.ops.concat((Genh, noisy_wav), axis=1)
        out_fake = self.netD(d_input_fake)
        label_fake = ops.ZerosLike()(out_fake)
        loss_fake = self.loss_fn(out_fake, label_fake)
        return loss_real + loss_fake

class MSEGAN(nn.Cell):

    def __init__(self, oneStepD, oneStepG):
        super(MSEGAN, self).__init__(auto_prefix=True)
        self.oneStepD = oneStepD
        self.oneStepG = oneStepG

    def construct(self, clean_wav, noisy_wav):
        output_D = self.oneStepD(clean_wav, noisy_wav).view(-1)
        netD_loss = output_D.mean()
        output_G = self.oneStepG(noisy_wav).view(-1)
        netG_loss = output_G.mean()
        return netD_loss, netG_loss
    


class SEGAN(Model):

    def __init__(self, opts, name='SEGAN',
                 generator=None,
                 discriminator=None):
        super(SEGAN, self).__init__(name)
        self.save_path = opts.save_path
        self.preemph = opts.preemph
        self.reg_loss = nn.L1Loss # 目前这里暂时写死
        if generator is None:
            # Build G and D
            self.G = Generator(1,
                               opts.genc_fmaps,
                               opts.gkwidth,
                               opts.genc_poolings,
                               opts.gdec_fmaps,
                               opts.gdec_kwidth,
                               opts.gdec_poolings,
                               z_dim=opts.z_dim,
                               no_z=opts.no_z,
                               skip=(not opts.no_skip),
                               bias=opts.bias,
                               skip_init=opts.skip_init,
                               skip_type=opts.skip_type,
                               skip_merge=opts.skip_merge,
                               skip_kwidth=opts.skip_kwidth)
        else:
            self.G = generator
        apply(self.G, weights_init)
        print('Generator: ', self.G)

        if discriminator is None:
            dkwidth = opts.gkwidth if opts.dkwidth is None else opts.dkwidth
            self.D = Discriminator(2, opts.denc_fmaps, dkwidth,
                                   poolings=opts.denc_poolings,
                                   pool_type=opts.dpool_type,
                                   pool_slen=opts.dpool_slen, 
                                   norm_type=opts.dnorm_type,
                                   phase_shift=opts.phase_shift,
                                   sinc_conv=opts.sinc_conv)
        else:
            self.D = discriminator
        apply(self.D, weights_init)
        print('Discriminator: ', self.D)
    
    def generate(self, inwav, z = None, device='cpu'):
        self.G.set_train(False)
        N = 16384
        x = np.zeros((1, 1, N))
        c_res = None
        slice_idx = ms.numpy.zeros(1)
        for beg_i in range(0, inwav.shape[2], N):
            if inwav.shape[2] - beg_i < N:
                length = inwav.shape[2] - beg_i
                pad = N - length
            else:
                length = N
                pad = 0
            if pad > 0:
                x[0, 0] = ms.ops.concat((inwav[0, 0, beg_i:beg_i + length], 
                                         ms.numpy.zeros(pad)), axis=0)
            else:
                x[0, 0] = inwav[0, 0, beg_i:beg_i + length]
            if isinstance(x, np.ndarray):
                x = x.astype(ms.float32)
            canvas_w, hall = self.infer_G(x, z=z, ret_hid=True)
            nums = []
            for k in hall.keys():
                if 'enc' in k and 'zc' not in k:
                    nums.append(int(k.split('_')[1]))
            g_c = hall['enc_{}'.format(max(nums))]
            if z is None and hasattr(self.G, 'z'):
                # if z was created inside G as first inference
                z = self.G.z
            if pad > 0:
                canvas_w = canvas_w[0, 0, :-pad]
            canvas_w = canvas_w.asnumpy().squeeze()
            if c_res is None:
                c_res = canvas_w
            else:
                c_res = np.concatenate((c_res, canvas_w))
            slice_idx += 1
        #de-emph
        c_res = de_emphasize(c_res, self.preemph)
        return c_res, g_c

    def discriminate(self, cwav, nwav):
        self.D.set_train(False)
        d_in = ms.ops.concat((cwav, nwav), axis=1)
        d_veredict, _ = self.D(d_in)
        return d_veredict

    def infer_G(self, nwav, cwav=None, z=None, ret_hid=False):
        if ret_hid:
            Genh, hall = self.G(nwav, z=z, ret_hid=ret_hid)
            return Genh, hall
        else:
            Genh = self.G(nwav, z=z, ret_hid=ret_hid)
            return Genh

    def infer_D(self, x_, ref):
        D_in = ms.ops.concat((x_, ref), axis=1)
        return self.D(D_in)

    def gen_train_samples(self, clean_samples, noisy_samples, z_sample, iteration=None):
        if z_sample is not None:
            canvas_w = self.infer_G(noisy_samples, clean_samples, z=z_sample)
        else:
            canvas_w = self.infer_G(noisy_samples, clean_samples)
        sample_dif = noisy_samples - clean_samples
        # sample wavs
        for m in range(noisy_samples.size(0)):
            m_canvas = de_emphasize(canvas_w[m,
                                             0].cpu().data.numpy(),
                                    self.preemph)
            print('w{} max: {} min: {}'.format(m,
                                               m_canvas.max(),
                                               m_canvas.min()))
            wavfile.write(os.path.join(self.save_path,
                                       'sample_{}-'
                                       '{}.wav'.format(iteration,
                                                       m)),
                          int(16e3), m_canvas)
            m_clean = de_emphasize(clean_samples[m,
                                                 0].cpu().data.numpy(),
                                   self.preemph)
            m_noisy = de_emphasize(noisy_samples[m,
                                                 0].cpu().data.numpy(),
                                   self.preemph)
            m_dif = de_emphasize(sample_dif[m,
                                            0].cpu().data.numpy(),
                                 self.preemph)
            m_gtruth_path = os.path.join(self.save_path,
                                         'gtruth_{}.wav'.format(m))
            if not os.path.exists(m_gtruth_path):
                wavfile.write(os.path.join(self.save_path,
                                           'gtruth_{}.wav'.format(m)),
                              int(16e3), m_clean)
                wavfile.write(os.path.join(self.save_path,
                                           'noisy_{}.wav'.format(m)),
                              int(16e3), m_noisy)
                wavfile.write(os.path.join(self.save_path,
                                           'dif_{}.wav'.format(m)),
                              int(16e3), m_dif)
        
    def build_optimizers(self, opts):
        if opts.opt == 'rmsprop':
            Gopt = nn.RMSProp(self.G.get_parameters(), learning_rate=opts.g_lr)
            Dopt = nn.RMSProp(self.D.get_parameters(), learning_rate=opts.d_lr)
        elif opts.opt == 'adam':
            Gopt = nn.Adam(self.G.get_parameters(), learning_rate=opts.g_lr, beta1=0, beta2=0.9)
            Dopt = nn.Adam(self.D.get_parameters(), learning_rate=opts.d_lr, beta1=0, beta2=0.9)
        else:
            raise ValueError('Unrecognized optimizer {}'.format(opts.opt))
        return Gopt, Dopt

    def train(self, opts, dloader, criterion, l1_init, l1_dec_step,
              l1_dec_epoch, log_freq, va_dloader=None,
              device='cpu'):
        """ Train the SEGAN """

        # create writer
        self.writer = SummaryWriter(os.path.join(self.save_path, 'train'))

        # Build the optimizers
        Gopt, Dopt = self.build_optimizers(opts)

        # attach opts to models so that they are saved altogether in ckpts
        self.G.optim = Gopt
        self.D.optim = Dopt

        netD_with_criterion = WithLossCellD(self.D, self.G, criterion)
        netG_with_criterion = WithLossCellG(self.D, self.G, criterion)
        myTrainOneStepCellD = nn.TrainOneStepCell(netD_with_criterion, Dopt)
        myTrainOneStepCellG = nn.TrainOneStepCell(netG_with_criterion, Gopt)

        self.msegan = MSEGAN(myTrainOneStepCellD, myTrainOneStepCellG)

        # Build savers for end of epoch, storing up to 3 epochs each
        eoe_g_saver = Saver(self.G, opts.save_path, max_ckpts=3,
                            optimizer=self.G.optim, prefix='EOE_G-')
        eoe_d_saver = Saver(self.D, opts.save_path, max_ckpts=3,
                            optimizer=self.D.optim, prefix='EOE_D-')
        num_batches = dloader.source_len // dloader.batch_size
        l1_weight = l1_init
        iteration = 1
        timings = []
        evals = {}
        noisy_evals = {}
        noisy_samples = None
        clean_samples = None
        z_sample = None
        patience = opts.patience
        best_val_obj = 0
        # acumulator for exponential avg of valid curve
        acum_val_obj = 0
        # make label tensor
        label = ms.numpy.ones(opts.batch_size)
        # label = label.to(device) ms 没有to(device), 全局设置
        G_losses = []
        D_losses = []

        for epoch in range(1, opts.epoch + 1):
            beg_t = timeit.default_timer()
            self.msegan.set_train()
            # dataIter = dloader.create_tuple_iterator()
            for bidx, batch in enumerate(dloader, start=1):
                # batch = collate_fn(batch) # 手动添加collate_fn
                if epoch >= l1_dec_epoch:
                    if l1_weight > 0:
                        l1_weight -= l1_dec_step
                        # ensure it is 0 if it goes < 0
                        l1_weight = max(0, l1_weight)
                sample = batch
                if len(sample) == 4:
                    uttname, clean, noisy, slice_idx = batch
                else:
                    raise ValueError('Returned {} elements per '
                                     'sample?'.format(len(sample)))
                clean = ms.ops.expand_dims(clean, axis=1)
                noisy = ms.ops.expand_dims(noisy, axis=1)
                # label = ms.numpy.ones(clean.shape[0])
                if noisy_samples is None:
                    noisy_samples = noisy[:20, :, :] # .contiguous()
                    clean_samples = clean[:20, :, :] # .contiguous()
                # (1) D real update
                # Dopt.zero_grad() ms对张量的梯度无法存储在tensor中
                # total_d_fake_loss = 0
                # total_d_real_loss = 0
                # Genh = self.infer_G(noisy, clean)
                # lab = label
                # d_real, _ = self.infer_D(clean, noisy)
                # d_real_loss = criterion(d_real.view(-1), lab)
                # d_real_loss.backward()
                # total_d_real_loss += d_real_loss

                # (2) D fake update
                # d_fake, _ = self.infer_D(Genh.detach(), noisy)
                # lab = ms.numpy.zeros(label.shape)
                # d_fake_loss = criterion(d_fake.view(-1), lab)
                # d_fake_loss.backward()
                # total_d_fake_loss += d_fake_loss
                # Dopt.step()

                # d_loss = d_fake_loss + d_real_loss

                # (3) G real update
                # Gopt.zero_grad()
                # lab = ms.numpy.ones(label.shape)
                # d_fake_, _ = self.infer_D(Genh, noisy)
                # g_adv_loss = criterion(d_fake_.view(-1), lab)
                # #g_l1_loss = l1_weight * F.l1_loss(Genh, clean)
                # g_l1_loss = l1_weight * self.reg_loss(Genh, clean)
                # g_loss = g_adv_loss + g_l1_loss
                # g_loss.backward()
                # Gopt.step()
                print("------------------------------------------------")
                print(f"clean shape: {clean.shape}, noisy shape: {noisy.shape}")
                netD_loss, netG_loss = self.msegan(clean, noisy)
                end_t = timeit.default_timer()
                timings.append(end_t - beg_t)
                beg_t = timeit.default_timer()
                # if z_sample is None and not self.G.no_z:
                #     # capture sample now that we know shape after first 
                #     # inference
                #     z_sample = self.G.z[:20, :, :]
                #     print('z_sample shape: ', z_sample.shape)
                D_losses.append(netD_loss.asnumpy())
                G_losses.append(netG_loss.asnumpy())
                if bidx % 5 == 0 or bidx >= num_batches:
                    print('[%2d/%d][%3d/%d]   Loss_D:%7.4f  Loss_G:%7.4f' % (
                epoch + 1, iteration, bidx, num_batches, netD_loss.asnumpy(), netG_loss.asnumpy()))
                #     d_real_loss_v = d_real_loss #.cpu().item()
                #     d_fake_loss_v = d_fake_loss # .cpu().item()
                #     g_adv_loss_v = g_adv_loss # .cpu().item()
                #     g_l1_loss_v = g_l1_loss # .cpu().item()
                #     log = '(Iter {}) Batch {}/{} (Epoch {}) d_real:{:.4f}, ' \
                #           'd_fake:{:.4f}, '.format(iteration, bidx,
                #                                    len(dloader), epoch,
                #                                    d_real_loss_v,
                #                                    d_fake_loss_v)
                #     log += 'g_adv:{:.4f}, g_l1:{:.4f} ' \
                #            'l1_w: {:.2f}, '\
                #            'btime: {:.4f} s, mbtime: {:.4f} s' \
                #            ''.format(g_adv_loss_v,
                #                      g_l1_loss_v,
                #                      l1_weight, 
                #                      timings[-1],
                #                      np.mean(timings))
                #     print(log)
                #     self.writer.add_scalar('D_real', d_real_loss_v,
                #                            iteration)
                #     self.writer.add_scalar('D_fake', d_fake_loss_v,
                #                            iteration)
                #     self.writer.add_scalar('G_adv', g_adv_loss_v,
                #                            iteration)
                #     self.writer.add_scalar('G_l1', g_l1_loss_v,
                #                            iteration)
                #     self.writer.add_histogram('D_fake__hist', d_fake_,
                #                               iteration, bins='sturges')
                #     self.writer.add_histogram('D_fake_hist', d_fake,
                #                               iteration, bins='sturges')
                #     self.writer.add_histogram('D_real_hist', d_real,
                #                               iteration, bins='sturges')
                #     self.writer.add_histogram('Gz', Genh,
                #                               iteration, bins='sturges')
                #     self.writer.add_histogram('clean', clean,
                #                               iteration, bins='sturges')
                #     self.writer.add_histogram('noisy', noisy,
                #                               iteration, bins='sturges')
                    # get D and G weights and plot their norms by layer and
                    # global
                    def model_weights_norm(model, total_name):
                        total_GW_norm = 0
                        for k, v in model.named_parameters():
                            if 'weight' in k:
                                W = v.data
                                norm_op = ms.ops.LpNorm()
                                W_norm = norm_op(W)
                                self.writer.add_scalar('{}_Wnorm'.format(k),
                                                       W_norm,
                                                       iteration)
                                total_GW_norm += W_norm
                        self.writer.add_scalar('{}_Wnorm'.format(total_name),
                                               total_GW_norm,
                                               iteration)
                    model_weights_norm(self.G, 'Gtotal')
                    model_weights_norm(self.D, 'Dtotal')
                    if not opts.no_train_gen:
                        #canvas_w = self.G(noisy_samples, z=z_sample)
                        self.gen_train_samples(clean_samples, noisy_samples,
                                               z_sample,
                                               iteration=iteration)
                iteration += 1

            # if va_dloader is not None:
            #     if len(noisy_evals) == 0:
            #         evals_, noisy_evals_ = self.evaluate(opts, va_dloader, log_freq, do_noisy=True)
            #         for k, v in noisy_evals_.items():
            #             if k not in noisy_evals:
            #                 noisy_evals[k] = []
            #             noisy_evals[k] += v
            #             self.writer.add_scalar('noisy-{}'.format(k), noisy_evals[k][-1], epoch)
            #     else:
            #         evals_ = self.evaluate(opts, va_dloader, log_freq, do_noisy=False)
            #         for k, v in evals_.items():
            #             if k not in evals:
            #                 evals[k] = []
            #             evals[k] += v
            #             self.writer.add_scalar('Genh-{}'.format(k), 
            #                                evals[k][-1], epoch)
            #     val_obj = evals['covl'][-1] + evals['pesq'][-1] + \
            #             evals['ssnr'][-1]
            #     self.writer.add_scalar('Genh-val_obj',
            #                            val_obj, epoch)
            #     if val_obj > best_val_obj:
            #         print('Val obj (COVL + SSNR + PESQ) improved '
            #               '{} -> {}'.format(best_val_obj,
            #                                 val_obj))
            #         best_val_obj = val_obj
            #         patience = opts.patience
            #         # save models with true valid curve is minimum
            #         self.G.save(self.save_path, iteration, True)
            #         self.D.save(self.save_path, iteration, True)
            #     else:
            #         patience -= 1
            #         print('Val loss did not improve. Patience'
            #               '{}/{}'.format(patience,
            #                              opts.patience))
            #         if patience <= 0:
            #             print('STOPPING SEGAN TRAIN: OUT OF PATIENCE.')
            #             break

            # # save models in end of epoch with EOE savers
            # self.G.save(self.save_path, iteration, saver=eoe_g_saver)
            # self.D.save(self.save_path, iteration, saver=eoe_d_saver)



    def evaluate(self, opts, dloader, log_freq, do_noisy=False, max_samples=1, device='cpu'):
        """ Objective evaluation with PESQ, SSNR, COVL, CBAK and CSIG """
        self.G.eval()
        self.D.eval()
        evals = {'pesq':[], 'ssnr':[], 'csig':[],
                 'cbak':[], 'covl':[]}
        pesqs = []
        ssnrs = []
        if do_noisy:
            noisy_evals = {'pesq':[], 'ssnr':[], 'csig':[],
                           'cbak':[], 'covl':[]}
            npesqs = []
            nssnrs = []
        if not hasattr(self, 'pool'):
            self.pool = mp.Pool(opts.eval_workers)
        total_s = 0
        timings = []
        # with torch.no_grad():
            # going over dataset ONCE
        for bidx, batch in enumerate(dloader, start=1):
            batch = collate_fn(batch)
            sample = batch
            if len(sample) == 4:
                uttname, clean, noisy, slice_idx = batch
            else:
                raise ValueError('Returned {} elements per '
                                    'sample?'.format(len(sample)))
            clean = clean
            noisy = noisy.unsqueeze(1)
            # clean = clean.to(device)
            # noisy = noisy.to(device)
            Genh = ms.ops.expand_dims(self.infer_G(noisy), axis=1)
            clean_npy = clean.as_numpy()
            Genh_npy = Genh.as_numpy()
            clean_npy = np.apply_along_axis(de_emphasize, 0, clean_npy,
                                            self.preemph)
            Genh_npy = np.apply_along_axis(de_emphasize, 0, Genh_npy,
                                            self.preemph)
            beg_t = timeit.default_timer()
            if do_noisy:
                noisy_npy = noisy.as_numpy()
                noisy_npy = np.apply_along_axis(de_emphasize, 0, noisy_npy,
                                                self.preemph)
                args = [(clean_npy[i], Genh_npy[i], noisy_npy[i]) for i in \
                        range(clean.size(0))]
            else:
                args = [(clean_npy[i], Genh_npy[i], None) for i in \
                        range(clean.size(0))]
            map_ret = self.pool.map(composite_helper, args)
            end_t = timeit.default_timer()
            print('Time to process eval with {} samples ' \
                    ': {} s'.format(clean.size(0), end_t - beg_t))
            if bidx >= max_samples:
                break

        def fill_ret_dict(ret_dict, in_dict):
            for k, v in in_dict.items():
                ret_dict[k].append(v)

        if do_noisy:
            for eval_, noisy_eval_ in map_ret:
                fill_ret_dict(evals, eval_)
                fill_ret_dict(noisy_evals, noisy_eval_)
            return evals, noisy_evals
        else:
            for eval_ in map_ret:
                fill_ret_dict(evals, eval_)
            return evals