import json
import multiprocessing as mp
import os
import random
import timeit
from random import shuffle

import mindspore as ms
import numpy as np
from mindspore import nn, ops
from mindspore.common.initializer import Normal, XavierUniform, initializer
from scipy import signal
from scipy.io import wavfile
from segan.datasets import collate_fn
from tensorboardX import SummaryWriter
from tqdm import tqdm

from ..datasets import *
from ..utils import *
from .core import *
from .discriminator import *
from .generator import *
from .ops import *


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv1DResBlock') != -1:
        # print('Initializing weights of convresblock to 0.0, 0.02')
        for k, p in m.parameters_and_names():
            if 'weight' in k and 'conv' in k:
                p = initializer(Normal(0.02, 0.0), p.shape)
    elif classname.find('Conv1d') != -1:
        # print('Initializing weight to 0.0 0.02 for module ', m)
        m.weight = initializer(Normal(0.02, 0.0), m.weight.shape)
        if hasattr(m, 'bias') and m.bias is not None:
            print('bias to 0 for module: ', m)
            m.bias = ops.zeros_like(m.bias)
    elif classname.find('Linear') != -1:
        # print('Initializing FC weight to xavier uniform')
        m.weight = initializer(XavierUniform(), m.weight.shape)

def z_fropout(m):
    classname = m.__class__.__name__
    if classname.find('Dropout') != -1:
        m.set_train()
    else:
        # pytorch的实际做法就是设置参数为False
        m.set_train(False)

def apply(m, fn):
    for cell in m.cells():
        apply(cell, fn)
    fn(m)

class SEGAN(Model):

    def __init__(self, opts, name='SEGAN',
                 generator=None,
                 discriminator=None):
        super(SEGAN, self).__init__(name)
        self.save_path = opts.save_path
        self.preemph = opts.preemph
        self.reg_loss = nn.L1Loss(reduction='mean') # 目前这里暂时写死
        if generator is None:
            # Build G and D
            self.G = Generator(1,
                               opts.genc_fmaps,
                               opts.gkwidth,
                               opts.genc_poolings,
                               opts.gdec_fmaps,
                               opts.gdec_kwidth,
                               opts.gdec_poolings,
                               z_dim=opts.z_dim,
                               no_z=opts.no_z,
                               skip=(not opts.no_skip),
                               bias=opts.bias,
                               skip_init=opts.skip_init,
                               skip_type=opts.skip_type,
                               skip_merge=opts.skip_merge,
                               skip_kwidth=opts.skip_kwidth)
        else:
            self.G = generator
        apply(self.G, weights_init)
        # print('Generator: ', self.G)

        if discriminator is None:
            dkwidth = opts.gkwidth if opts.dkwidth is None else opts.dkwidth
            self.D = Discriminator(2, opts.denc_fmaps, dkwidth,
                                   poolings=opts.denc_poolings,
                                   pool_type=opts.dpool_type,
                                   pool_slen=opts.dpool_slen, 
                                   norm_type=opts.dnorm_type,
                                   phase_shift=opts.phase_shift,
                                   sinc_conv=opts.sinc_conv)
        else:
            self.D = discriminator
        apply(self.D, weights_init)
        # print('Discriminator: ', self.D)
    def gen_train_samples(self, clean_samples, noisy_samples, z_sample, iteration=None):
        if z_sample is not None:
            canvas_w = self.G(noisy_samples, clean_samples, z=z_sample)
        else:
            canvas_w = self.G(noisy_samples, clean_samples)
        sample_dif = noisy_samples - clean_samples
        # sample wavs
        for m in range(noisy_samples.shape[0]):
            m_canvas = de_emphasize(canvas_w[m, 0].asnumpy(), self.preemph)
            print('w{} max: {} min: {}'.format(m, m_canvas.max(), m_canvas.min()))
            wavfile.write(os.path.join(self.save_path, 'sample_{}-{}.wav'.format(iteration, m)), int(16e3), m_canvas)
            m_clean = de_emphasize(clean_samples[m, 0].asnumpy(), self.preemph)
            m_noisy = de_emphasize(noisy_samples[m, 0].asnumpy(), self.preemph)
            m_dif = de_emphasize(sample_dif[m, 0].asnumpy(), self.preemph)
            m_gtruth_path = os.path.join(self.save_path, 'gtruth_{}.wav'.format(m))
            if not os.path.exists(m_gtruth_path):
                wavfile.write(os.path.join(self.save_path, 'gtruth_{}.wav'.format(m)), int(16e3), m_clean)
                wavfile.write(os.path.join(self.save_path, 'noisy_{}.wav'.format(m)), int(16e3), m_noisy)
                wavfile.write(os.path.join(self.save_path, 'dif_{}.wav'.format(m)), int(16e3), m_dif)

    def build_optimizers(self, opts):
        if opts.opt == 'rmsprop':
            Gopt = nn.RMSProp(self.G.get_parameters(), learning_rate=opts.g_lr, weight_decay=1e-4)
            Dopt = nn.RMSProp(self.D.get_parameters(), learning_rate=opts.d_lr, weight_decay=1e-4)
        elif opts.opt == 'adam':
            Gopt = nn.Adam(self.G.get_parameters(), learning_rate=opts.g_lr, beta1=0, beta2=0.9, weight_decay=1e-4)
            Dopt = nn.Adam(self.D.get_parameters(), learning_rate=opts.d_lr, beta1=0, beta2=0.9, weight_decay=1e-4)
        else:
            raise ValueError('Unrecognized optimizer {}'.format(opts.opt))
        return Gopt, Dopt

    def train(self, opts, dloader, criterion, l1_init, l1_dec_step,
              l1_dec_epoch, log_freq, va_dloader=None,
              device='cpu'):
        """ Train the SEGAN """
        beg_t = timeit.default_timer()
        # create writer
        self.writer = SummaryWriter(os.path.join(self.save_path, 'train'))

        # Build the optimizers
        Gopt, Dopt = self.build_optimizers(opts)

        # attach opts to models so that they are saved altogether in ckpts
        self.G.optim = Gopt
        self.D.optim = Dopt

        def D_real_update(noisy, clean):
            Genh = self.G(noisy)
            d_real = self.D(ops.concat([clean, noisy], axis = 1))
            d_real_loss = criterion(d_real.view(-1), ms.numpy.ones(noisy.shape[0], dtype=ms.float32)) # view
            return d_real_loss, Genh, d_real
        
        grad_fn_d_real = ops.value_and_grad(D_real_update, None, self.D.optim.parameters, has_aux=True)

        def D_fake_update(Genh, noisy):
            d_fake = self.D(ops.concat([ops.stop_gradient(Genh), noisy], axis = 1))
            d_fake_loss = criterion(d_fake.view(-1), ms.numpy.zeros(noisy.shape[0], dtype=ms.float32))
            return d_fake_loss, d_fake

        grad_fn_d_fake = ops.value_and_grad(D_fake_update, None, self.D.optim.parameters)

        def G_real_update(Genh, noisy, clean, l1_weight):
            d_fake_ = self.D(ops.concat([Genh, noisy], axis = 1))
            g_adv_loss = criterion(d_fake_.view(-1), ms.numpy.ones(noisy.shape[0], dtype=ms.float32))
            g_l1_loss = l1_weight * self.reg_loss(Genh, clean)
            
            return g_adv_loss, g_l1_loss, d_fake_

        grad_fn_g_real = ops.value_and_grad(G_real_update, None, self.G.optim.parameters)

        def train_step(noisy, clean, l1_weight):
            
            (d_real_loss, Genh, d_real), d_real_grads = grad_fn_d_real(noisy, clean)
            d_real_loss = ops.depend(d_real_loss, self.D.optim(d_real_grads))
            
            (d_fake_loss, d_fake), d_fake_grads = grad_fn_d_fake(Genh, noisy)
            d_fake_loss = ops.depend(d_fake_loss, self.D.optim(d_fake_grads))
            
            (g_adv_loss, g_l1_loss, d_fake_), g_grads = grad_fn_g_real(Genh, noisy, clean, l1_weight)
            g_loss = g_adv_loss + g_l1_loss
            g_loss = ops.depend(g_loss, self.G.optim(g_grads))

            return d_real_loss, d_fake_loss, d_real, d_fake, d_fake_, g_adv_loss, g_l1_loss, Genh

         # Build savers for end of epoch, storing up to 3 epochs each
        eoe_g_saver = Saver(self.G, opts.save_path, max_ckpts=3,
                            optimizer=self.G.optim, prefix='EOE_G-')
        eoe_d_saver = Saver(self.D, opts.save_path, max_ckpts=3,
                            optimizer=self.D.optim, prefix='EOE_D-')
        num_batches = dloader.source_len // dloader.batch_size
        l1_weight = l1_init
        iteration = 1
        timings = []
        evals = {}
        noisy_evals = {}
        noisy_samples = None
        clean_samples = None
        z_sample = None
        patience = opts.patience
        best_val_obj = 0
        # acumulator for exponential avg of valid curve
        acum_val_obj = 0
        # make label tensor
        label = ms.numpy.ones(opts.batch_size)
        # label = label.to(device) ms 没有to(device), 全局设置
        G_losses = []
        D_losses = []

        for epoch in range(1, opts.epoch + 1):
            beg_t = timeit.default_timer()
            self.D.set_train()
            self.G.set_train()
            # dataIter = dloader.create_tuple_iterator()
            epoch_steps = dloader.source_len//opts.batch_size
            dloader_tq = tqdm(enumerate(dloader, start=1), total=epoch_steps, \
                desc=f'Epoch {epoch}/{opts.epoch}', unit='batch', ncols=120, ascii=True, mininterval=1)
            for bidx, batch in dloader_tq:
                # batch = collate_fn(batch) # 手动添加collate_fn
                if epoch >= l1_dec_epoch:
                    if l1_weight > 0:
                        l1_weight -= l1_dec_step
                        # ensure it is 0 if it goes < 0
                        l1_weight = max(0, l1_weight)
                sample = batch
                if len(sample) == 4:
                    uttname, clean, noisy, slice_idx = batch
                else:
                    raise ValueError('Returned {} elements per '
                                        'sample?'.format(len(sample)))
                # assert clean.shape[0] == opts.batch_size, f"data batch size {clean.shape[0]} should be equal to setting size {opt.batch_size}"
                clean = ops.expand_dims(clean, axis=1)
                noisy = ops.expand_dims(noisy, axis=1)
                # label = ms.numpy.ones(clean.shape[0])
                if noisy_samples is None:
                    noisy_samples = noisy[:20, :, :] # .contiguous()
                    clean_samples = clean[:20, :, :] # .contiguous()

                d_real_loss, d_fake_loss, d_real, d_fake, \
                d_fake_, g_adv_loss, g_l1_loss, Genh = train_step(noisy, clean, l1_weight)
                
                end_t = timeit.default_timer()
                timings.append(end_t - beg_t)
                beg_t = timeit.default_timer()

                if z_sample is None and not self.G.no_z:
                    # capture sample now that we know shape after first
                    # inference
                    z_sample = self.G.z[:20, :, :]
                    print('z_sample size: ', z_sample.shape)

                if bidx % log_freq == 0 or bidx >= epoch_steps:
                    d_real_loss_v = d_real_loss.asnumpy()
                    d_fake_loss_v = d_fake_loss.asnumpy()
                    g_adv_loss_v = g_adv_loss.asnumpy()
                    g_l1_loss_v = g_l1_loss.asnumpy()
                    log = '(Iter {}) Batch {}/{} (Epoch {}) d_real:{:.2e}, d_fake:{:.2e}, '.\
                            format(iteration, bidx, epoch_steps, epoch, d_real_loss_v, d_fake_loss_v)
                    log += 'g_adv:{:.2e}, g_l1:{:.2e} l1_w: {:.2e}, btime: {:.2f}s, mbtime: {:.2f}s'.\
                            format(g_adv_loss_v, g_l1_loss_v, l1_weight, timings[-1], np.mean(timings))
                    print(log)
                    self.writer.add_scalar('D_real', d_real_loss_v, iteration)
                    self.writer.add_scalar('D_fake', d_fake_loss_v, iteration)
                    self.writer.add_scalar('G_adv', g_adv_loss_v, iteration)
                    self.writer.add_scalar('G_l1', g_l1_loss_v, iteration)
                    self.writer.add_histogram('D_fake__hist', d_fake_.asnumpy(), iteration, bins='sturges')
                    self.writer.add_histogram('D_fake_hist', d_fake.asnumpy(), iteration, bins='sturges')
                    self.writer.add_histogram('D_real_hist', d_real.asnumpy(), iteration, bins='sturges')
                    self.writer.add_histogram('Gz', Genh.asnumpy(), iteration, bins='sturges')
                    self.writer.add_histogram('clean', clean.asnumpy(), iteration, bins='sturges')
                    self.writer.add_histogram('noisy', noisy.asnumpy(), iteration, bins='sturges')
                    # get D and G weights and plot their norms by layer and
                    # global
                    # def model_weights_norm(model, total_name):
                    #     total_GW_norm = 0
                    #     for k, v in model.named_parameters():
                    #         if 'weight' in k:
                    #             W = v.data
                    #             norm_op = ops.LpNorm()
                    #             W_norm = norm_op(W)
                    #             self.writer.add_scalar('{}_Wnorm'.format(k), W_norm, iteration)
                    #             total_GW_norm += W_norm
                    #     self.writer.add_scalar('{}_Wnorm'.format(total_name), total_GW_norm, iteration)
                    # model_weights_norm(self.G, 'Gtotal')
                    # model_weights_norm(self.D, 'Dtotal')
                    if not opts.no_train_gen:
                        self.gen_train_samples(clean_samples, noisy_samples, z_sample, iteration=iteration)

            # epoch结束，进行验证
            if va_dloader is not None:
                if len(noisy_evals) == 0:
                    evals_, noisy_evals_ = self.evaluate(opts, va_dloader, log_freq, do_noisy=True)
                    for k, v in noisy_evals_.items():
                        if k not in noisy_evals:
                            noisy_evals[k] = []
                        noisy_evals[k] += v
                        self.writer.add_scalar('noisy-{}'.format(k), noisy_evals[k][-1], epoch)
                else:
                    evals_ = self.evaluate(opts, va_dloader, log_freq, do_noisy=False)
                for k, v in evals_.items():
                    if k not in evals:
                        evals[k] = []
                    evals[k] += v
                    self.writer.add_scalar('Genh-{}'.format(k), evals[k][-1], epoch)
                val_obj = evals['covl'][-1] + evals['pesq'][-1] + evals['ssnr'][-1]
                self.writer.add_scalar('Genh-val_obj', val_obj, epoch)
                if val_obj > best_val_obj:
                    print('Val obj (COVL + SSNR + PESQ) improved {} -> {}, save ckpt on iteration {}'.\
                        format(best_val_obj, val_obj, iteration))
                    best_val_obj = val_obj
                    patience = opts.patience
                    # save models with true valid curve is minimum
                    ms.save_checkpoint(self.G, "{}/generator_best.ckpt".format(opts.save_path, iteration))
                    ms.save_checkpoint(self.D, "{}/discriminator_best.ckpt".format(opts.save_path, iteration))
                    # self.G.save(self.save_path, iteration, True)
                    # self.D.save(self.save_path, iteration, True)
                else:
                    patience -= 1
                    print('Val loss did not improve. Patience {}/{}'.format(patience,opts.patience))
                    if patience <= 0:
                        print('STOPPING SEGAN TRAIN: OUT OF PATIENCE.')
                        break
            iteration += 1

            # save models in end of epoch with EOE savers
            ms.save_checkpoint(self.G, "{}/generator.ckpt".format(opts.save_path, iteration))
            ms.save_checkpoint(self.D, "{}/discriminator.ckpt".format(opts.save_path, iteration))

    def evaluate(self, opts, dloader, log_freq, do_noisy=False,
                 max_samples=1, device='cpu'):
        """ Objective evaluation with PESQ, SSNR, COVL, CBAK and CSIG """
        self.G.set_train(False)
        self.D.set_train(False)
        evals = {'pesq':[], 'ssnr':[], 'csig':[], 'cbak':[], 'covl':[]}
        pesqs = []
        ssnrs = []
        if do_noisy:
            noisy_evals = {'pesq':[], 'ssnr':[], 'csig':[], 'cbak':[], 'covl':[]}
            npesqs = []
            nssnrs = []
        if not hasattr(self, 'pool'):
            self.pool = mp.Pool(opts.eval_workers)
        total_s = 0
        timings = []
        # going over dataset ONCE
        for bidx, batch in enumerate(dloader, start=1):
            sample = batch
            if len(sample) == 4:
                uttname, clean, noisy, slice_idx = batch
            else:
                raise ValueError('Returned {} elements per sample?'.format(len(sample)))
            clean = clean
            noisy = noisy.unsqueeze(1)
            Genh = self.G(noisy).squeeze(1)
            clean_npy = clean.asnumpy()
            Genh_npy = Genh.asnumpy()
            clean_npy = np.apply_along_axis(de_emphasize, 0, clean_npy, self.preemph)
            Genh_npy = np.apply_along_axis(de_emphasize, 0, Genh_npy, self.preemph)
            beg_t = timeit.default_timer()
            if do_noisy:
                noisy_npy = noisy.asnumpy()
                noisy_npy = np.apply_along_axis(de_emphasize, 0, noisy_npy, self.preemph)
                args = [(clean_npy[i], Genh_npy[i], noisy_npy[i]) for i in range(opts.batch_size)]
            else:
                args = [(clean_npy[i], Genh_npy[i], None) for i in range(opts.batch_size)]
            map_ret = self.pool.map(composite_helper, args)
            end_t = timeit.default_timer()
            print('Time to process eval with {} samples: {} s'.format(opts.batch_size, end_t - beg_t))
            if bidx >= max_samples:
                break

        def fill_ret_dict(ret_dict, in_dict):
            for k, v in in_dict.items():
                ret_dict[k].append(v)

        if do_noisy:
            for eval_, noisy_eval_ in map_ret:
                fill_ret_dict(evals, eval_)
                fill_ret_dict(noisy_evals, noisy_eval_)
            return evals, noisy_evals
        else:
            for eval_ in map_ret:
                fill_ret_dict(evals, eval_)
            return evals