import mindspore as ms
import mindspore.nn as nn
import random
import numpy as np
from pytest import skip
# try:
#     from core import *
#     from modules import *
# except ImportError:
from .core import *
from .modules import *
from .spectral_norm import SpectralNorm

class GSkip(nn.Cell):

    def __init__(self, skip_type, size, skip_init, skip_dropout=0, 
                 merge_mode='sum', kwidth=11, bias=True):
        # skip_init only applies to alpha skips
        super().__init__()
        self.merge_mode = merge_mode
        if skip_type == 'alpha' or skip_type == 'constant':
            if skip_init == 'zero':
                alpha_ = ms.numpy.zeros(size)
            elif skip_init == 'randn':
                alpha_ = ms.numpy.randn(size)
            elif skip_init == 'one':
                alpha_ = ms.numpy.ones(size)
            else:
                raise TypeError('Unrecognized alpha init scheme: ', 
                                skip_init)
            if skip_type == 'alpha':
                self.skip_k = ms.Parameter(alpha_.view(1, 1, -1))
            else:
                # constan, not learnable
                self.skip_k = ms.Parameter(alpha_.view(1, -1, 1))
                self.skip_k.requires_grad = False
        elif skip_type == 'conv':
            if kwidth > 1:
                pad = kwidth // 2
            else:
                pad = 0
            self.skip_k = nn.Conv1d(size, size, kwidth, stride=1, pad_mode= 'pad',padding=pad, has_bias=bias)
        else:
            raise TypeError('Unrecognized GSkip scheme: ', skip_type)
        self.skip_type = skip_type
        if skip_dropout > 0:
            # 抄也要抄得不是很像
            self.skip_dropout = nn.Dropout(1 - skip_dropout)

    def __repr__(self):
        if self.skip_type == 'alpha':
            return self.cls_name + '(Alpha(1))'
        elif self.skip_type == 'constant':
            return self.cls_name + '(Constant(1))'
        else:
            return super().__repr__()
    
    def construct(self, hj, hi):
        if self.skip_type == 'conv':
            sk_h = self.skip_k(hj)
        else:
            skip_k = ms.numpy.tile(self.skip_k, (hj.shape[0], 1, hj.shape[2](2)))
            sk_h = skip_k * hj
        if hasattr(self, 'skip_dropout'):
            sk_h = self.skip_dropout(sk_h)
        if self.merge_mode == 'sum':
            # merge with input hi on current layer
            return sk_h + hi
        elif self.merge_mode == 'concat':
            return ms.ops.concat((hi, sk_h), axis=1)
        else:
            raise TypeError('Unrecognized skip merge mode: ', self.merge_mode)

class Generator(Model):

    def __init__(self, ninputs, fmaps,
                 kwidth, poolings, 
                 dec_fmaps=None,
                 dec_kwidth=None,
                 dec_poolings=None,
                 z_dim=None,
                 no_z=False,
                 skip=True,
                 bias=False,
                 skip_init='one',
                 skip_dropout=0,
                 skip_type='alpha',
                 norm_type=None,
                 skip_merge='sum',
                 skip_kwidth=11,
                 name='Generator'):
        super().__init__(name=name)
        self.skip = skip
        self.bias = bias
        self.no_z = no_z
        self.z_dim = z_dim
        self.z = ms.Parameter(ms.Tensor(1.0, ms.float32), name="z") # ms网络不支持在construct过程中对属性赋值
        self.enc_blocks = nn.CellList()
        assert isinstance(fmaps, list), type(fmaps)
        assert isinstance(poolings, list), type(poolings)
        if isinstance(kwidth, int): 
            kwidth = [kwidth] * len(fmaps)
        assert isinstance(kwidth, list), type(kwidth)
        skips = {}
        ninp = ninputs
        for pi, (fmap, pool, kw) in enumerate(zip(fmaps, poolings, kwidth),
                                              start=1):
            if skip and pi < len(fmaps):
                # Make a skip connection for all but last hidden layer
                gskip = GSkip(skip_type, fmap,
                              skip_init,
                              skip_dropout,
                              merge_mode=skip_merge,
                              kwidth=skip_kwidth,
                              bias=bias)
                l_i = pi - 1
                skips[str(l_i)] = {'alpha':gskip}
                setattr(self, 'alpha_{}'.format(l_i), skips[str(l_i)]['alpha'])
            enc_block = GConv1DBlock(
                ninp, fmap, kw, stride=pool, bias=bias,
                norm_type=norm_type
            )
            self.enc_blocks.append(enc_block)
            ninp = fmap
        
        self.skips = skips
        if not no_z and z_dim is None:
            z_dim = fmaps[-1]
        if not no_z:
            ninp += z_dim
        # Ensure we have fmaps, poolings and kwidth ready to decode
        if dec_fmaps is None:
            dec_fmaps = fmaps[::-1][1:] + [1]
        else:
            assert isinstance(dec_fmaps, list), type(dec_fmaps)
        if dec_poolings is None:
            dec_poolings = poolings[:]
        else:
            assert isinstance(dec_poolings, list), type(dec_poolings)
        self.dec_poolings = dec_poolings
        if dec_kwidth is None:
            dec_kwidth = kwidth[:]
        else:
            if isinstance(dec_kwidth, int): 
                dec_kwidth = [dec_kwidth] * len(dec_fmaps)
        assert isinstance(dec_kwidth, list), type(dec_kwidth)
        # Build the decoder
        self.dec_blocks = nn.CellList()
        for pi, (fmap, pool, kw) in enumerate(zip(dec_fmaps, dec_poolings, 
                                                  dec_kwidth),
                                              start=1):
            if skip and pi > 1 and pool > 1:
                if skip_merge == 'concat':
                    ninp *= 2

            if pi >= len(dec_fmaps):
                act = 'Tanh'
            else:
                act = None
            if pool > 1:
                dec_block = GDeconv1DBlock(
                    ninp, fmap, kw, stride=pool,
                    norm_type=norm_type, bias=bias,
                    act=act
                )
            else:
                dec_block = GConv1DBlock(
                    ninp, fmap, kw, stride=1, 
                    bias=bias,
                    norm_type=norm_type
                )
            self.dec_blocks.append(dec_block)
            ninp = fmap
            
        
    # def construct(self, x, z=None, ret_hid=False):
    #     hall = {}
    #     hi = x
    #     skips = self.skips

    #     for l_i, enc_layer in enumerate(self.enc_blocks):
    #         hi, linear_hi = enc_layer(hi, True)
    #         if self.skip and l_i < (len(self.enc_blocks) - 1):
    #             skips[str(l_i)]['tensor'] = linear_hi
    #         if ret_hid:
    #             hall['enc_{}'.format(l_i)] = hi
    #     if not self.no_z:
    #         if z is None:
    #             # make z, 拆包，这里就是把hi后面的维度给拆成单个元素放到参数里
    #             z = ms.numpy.randn(hi.shape[0], self.z_dim, *hi.shape[2:])
    #         if len(z.shape) != len(hi.shape):
    #             raise ValueError('len(z.size) {} != len(hi.size) {}'
    #                              ''.format(len(z.shape), len(hi.shape)))
    #         if not hasattr(self, 'z'):
    #             self.z = z
    #         hi = ms.ops.concat((z, hi), axis=1)
    #         if ret_hid:
    #             hall['enc_zc'] = hi
    #     else:
    #         z = None
    #     enc_layer_idx = len(self.enc_blocks) - 1
    #     for l_i, dec_layer in enumerate(self.dec_blocks):
    #         if self.skip and str(enc_layer_idx) in self.skips and self.dec_poolings[l_i] > 1:
    #             skip_conn = skips[str(enc_layer_idx)]
    #             hi = skip_conn['alpha'](skip_conn['tensor'], hi)
    #         hi = dec_layer(hi)
    #         enc_layer_idx -= 1
    #         if ret_hid:
    #             hall['dec_{}'.format(l_i)] = hi
    #     if ret_hid:
    #         return hi, hall
    #     else:
    #         return hi

    def construct(self, x, z=None, ret_hid=False):
        hi = x
        skips = self.skips

        # len of enc_blocks is 5
        hi, linear_hi = self.enc_blocks[0](hi, True)
        skips['0']['tensor'] = linear_hi[:]
        hi, linear_hi = self.enc_blocks[1](hi, True)
        skips['1']['tensor'] = linear_hi[:]
        hi, linear_hi = self.enc_blocks[2](hi, True)
        skips['2']['tensor'] = linear_hi[:]
        hi, linear_hi = self.enc_blocks[3](hi, True)
        skips['3']['tensor'] = linear_hi[:]
        hi, linear_hi = self.enc_blocks[4](hi, True)
        if not self.no_z:
            if z is None:
                z = ms.numpy.randn(hi.shape[0], self.z_dim, *hi.shape[2:])
            if len(z.shape) != len(hi.shape):
                raise ValueError('len(z.size) {} != len(hi.size) {}'.format(len(z.shape), len(hi.shape)))
            hi = ms.ops.concat((z, hi), axis=1)
        else:
            z = None
        enc_layer_idx = len(self.enc_blocks) - 1
        hi = self.dec_blocks[0](hi)
        skip_conn = skips['3']
        hi = skip_conn['alpha'](skip_conn['tensor'], hi)
        hi = self.dec_blocks[1](hi)
        skip_conn = skips['2']
        hi = skip_conn['alpha'](skip_conn['tensor'], hi)
        hi = self.dec_blocks[2](hi)
        skip_conn = skips['1']
        hi = skip_conn['alpha'](skip_conn['tensor'], hi)
        hi = self.dec_blocks[3](hi)
        hi = self.dec_blocks[4](hi)

        return hi

        



# Wait to Implement
class Generator1D(Model):

    def __init__(self, ninputs, enc_fmaps, kwidth,
                 activations, lnorm=False, dropout=0.,
                 pooling=2, z_dim=256, z_all=False,
                 skip=True, skip_blacklist=[],
                 dec_activations=None, cuda=False,
                 bias=False, aal=False, wd=0.,
                 skip_init='one', skip_dropout=0.,
                 no_tanh=False, aal_out=False,
                 rnn_core=False, linterp=False,
                 mlpconv=False, dec_kwidth=None,
                 no_z=False,
                 skip_type='alpha', 
                 num_spks=None, multilayer_out=False,
                 skip_merge='sum', snorm=False,
                 convblock=False, post_skip=False,
                 pos_code=False, satt=False,
                 dec_fmaps=None, up_poolings=None,
                 post_proc=False, out_gate=False, 
                 linterp_mode='linear', hidden_comb=False, 
                 big_out_filter=False, z_std=1,
                 freeze_enc=False, skip_kwidth=11,
                 pad_type='constant'):
        # if num_spks is specified, do onehot coditioners in dec stages
        # subract_mean: from output signal, get rif of mean by windows
        # multilayer_out: add some convs in between gblocks in decoder
        super().__init__(name='Generator1D')
        self.dec_kwidth = dec_kwidth
        self.skip_kwidth = skip_kwidth
        self.skip = skip
        self.skip_init = skip_init
        self.skip_dropout = skip_dropout
        self.snorm = snorm
        self.z_dim = z_dim
        self.z_all = z_all
        self.pos_code = pos_code
        self.post_skip = post_skip
        self.big_out_filter = big_out_filter
        self.satt = satt
        self.post_proc = post_proc
        self.pad_type = pad_type
        self.onehot = num_spks is not None
        if self.onehot:
            assert num_spks > 0
        self.num_spks = num_spks
        # do not place any z
        self.no_z = no_z
        self.do_cuda = cuda
        self.wd = wd
        self.no_tanh = no_tanh
        self.skip_blacklist = skip_blacklist
        self.z_std = z_std
        self.freeze_enc = freeze_enc
        self.gen_enc = nn.CellList()
        if aal or aal_out:
            # Make cheby1 filter to include into mindspore conv blocks
            from scipy.signal import cheby1, dlti, dimpulse
            system = dlti(*cheby1(8, 0.05, 0.8 / pooling))
            tout, yout = dimpulse(system)
            filter_h = yout[0]
        if aal:
            self.filter_h = filter_h
        else:
            self.filter_h = None

        if dec_kwidth is None:
            dec_kwidth = kwidth

        if isinstance(activations, str):
            if activations != 'glu':
                activations = getattr(nn, activations)()
        if not isinstance(activations, list):
            activations = [activations] * len(enc_fmaps)
        if not isinstance(pooling, list) or len(pooling) == 1: 
            pooling = [pooling] * len(enc_fmaps)
        skips = {}
        # Build Encoder
        for layer_idx, (fmaps, pool, act) in enumerate(zip(enc_fmaps, 
                                                           pooling,
                                                           activations)):
            if layer_idx == 0:
                inp = ninputs
            else:
                inp = enc_fmaps[layer_idx - 1]
            if self.skip and layer_idx < (len(enc_fmaps) - 1):
                if layer_idx not in self.skip_blacklist:
                    l_i = layer_idx
                    gskip = GSkip(skip_type, fmaps,
                                  skip_init,
                                  skip_dropout,
                                  merge_mode=skip_merge,
                                  cuda=self.do_cuda,
                                  kwidth=self.skip_kwidth)
                    skips[l_i] = {'alpha':gskip}
                    setattr(self, 'alpha_{}'.format(l_i), skips[l_i]['alpha'])

        