
from .siren import SirenNet
from .splitinr import SplitINR
from torch import nn
from .inr import INR
from .mfn import FourierNet,GaborNet
from .dmf import DMF
from .sparse import SPARSE
from .tf import TensorFactorization
from .dip.dip import DIP
from .dip.unet import UNet
from .unet1d import UNet1d
import torch as t
import numpy as np

def get_nn(net='inr_siren',parameters=None,init_mode=None):
    if isinstance(net,str):
        net_name = net.split(sep='_')[0]
        act_name = net.split(sep='_')[1] if '_' in net else None
        if net_name == 'inr':
            dim_in, dim_hidden, dim_out, num_layers,w0_initial = parameters
            if act_name == 'siren':
                nn = SirenNet(dim_in, dim_hidden, dim_out, num_layers,w0_initial=w0_initial)
            elif act_name == 'fourier':
                nn = FourierNet(dim_in,dim_hidden,dim_out,n_layers=num_layers,input_scale=w0_initial)
            elif act_name == 'gabor':
                nn = GaborNet(dim_in,dim_hidden,dim_out,n_layers=num_layers,input_scale=w0_initial)
            else:
                nn = INR(dim_in, dim_hidden, dim_out, num_layers,activation=act_name,init_mode=init_mode)
        elif net_name == 'splitinr':
            if len(parameters) == 6:
                dim_in, dim_hidden, dim_out, num_layers,mode,w0_initial = parameters
                nn = SplitINR(dim_in, dim_hidden, dim_out, num_layers,mode,act_name=act_name,w0_initial=w0_initial)
            elif len(parameters) == 7:
                dim_in, dim_hidden, dim_out, num_layers,mode,w0_initial,feature_in_list = parameters
                nn = SplitINR(dim_in, dim_hidden, dim_out, num_layers,mode,
                              act_name=act_name,w0_initial=w0_initial,feature_in_list=feature_in_list)
            else:
                raise('Wrong parameters num of split inr')
        elif net_name == 'increase':
            dim_in, dim_hidden, dim_out, num_layers = parameters
            if act_name == 'siren':
                raise('siren do not support monotonic mode, recommond use monoto_tanh instead')
            else:
                nn = INR(dim_in, dim_hidden, dim_out, num_layers,activation=act_name,init_mode=init_mode,monoto_mode=1)
        elif net_name == 'decrease':
            dim_in, dim_hidden, dim_out, num_layers = parameters
            if act_name == 'siren':
                raise('siren do not support monotonic mode, recommond use monoto_tanh instead')
            else:
                nn = INR(dim_in, dim_hidden, dim_out, num_layers,activation=act_name,init_mode=init_mode,monoto_mode=-1)
        elif net_name == 'sincrease':
            dim_in,dim_hidden,_,num_layers = parameters
            nn = SplitMonoto(dim_in,dim_hidden,num_layers,act_name,monoto_mode=1)
        elif net_name == 'sdecrease':
            dim_in,dim_hidden,num_layers,act_name = parameters
            nn = SplitMonoto(dim_in,dim_hidden,num_layers,act_name,monoto_mode=-1)
        elif net_name == 'dmf':
            nn = DMF(parameters)
        elif net_name == 'dip':
            nn = DIP(parameters)
        elif net_name == 'sparse':
            data_shape,N,std_w,mode = parameters
            nn = SPARSE(data_shape,N,std_w,mode)
        elif net_name == 'tf':
            dim_ori, dim_cor,mode = parameters
            nn = TensorFactorization(dim_ori, dim_cor,mode='tucker')
        elif net_name == 'composition':
            # parameters is a dict # collections.OrderedDict()
            # import collections
            nn = Composition(parameters)
        elif net_name == 'DoubleSplitINR':
            nn = DoubleSplitINR(parameters)
        elif net_name == 'DoubleINR':
            nn = DoubleINR(parameters)
        else:
            raise('Do not support net name = ',net)
    else:
        nn = net
    return nn

class Composition(nn.Module):
    def __init__(self, parameters):
        super().__init__()
        net_list = []
        self.norm_index = []
        for item_i,(net_name,parameter) in enumerate(parameters.items()):
            if 'norm' in net_name:
                net_list.append(Normalization(parameter[0]))
                self.norm_index.append(item_i)
            else:
                net_list.append(get_nn(net_name.split(sep=' ')[0],parameter))
        self.net_list = nn.ModuleList(net_list)

    def forward(self, x):
        for item_i,net in enumerate(self.net_list):
            if item_i in self.norm_index:
                x = net(self.net_list[item_i-1],x)
            else:
                x = net(x)
        return x


class DoubleSplitINR(nn.Module):
    def __init__(self,parameters):
        super().__init__()
        self.dim_in = parameters['splitinr_cor'][0]
        self.t = 1
        try:
            self.feature_type = parameters['feature_type']
        except KeyError:
            self.feature_type = 'identity'
        try:
            self.rff_dim = parameters['rff_dim']
        except KeyError:
            self.rff_dim = 1
        if self.feature_type == 'combine':
            if self.rff_dim == 1:
                parameters['splitinr_feature'][-1][0]+=self.rff_dim
                parameters['splitinr_feature'][-1][1]+=self.rff_dim
            else:
                parameters['splitinr_feature'][-1][0]+=self.rff_dim*2
                parameters['splitinr_feature'][-1][1]+=self.rff_dim*2
        self.netdict = nn.ModuleDict({
            'splitinr_cor':get_nn('splitinr_siren',parameters['splitinr_cor']),
            'splitinr_feature':get_nn('splitinr_siren',parameters['splitinr_feature'])})
        self.rffB = t.randn(1,self.rff_dim)/np.sqrt(self.rff_dim)
        if self.feature_type in ['unet','combine']:
            for i in range(2):
                num_input_channels = parameters['splitinr_cor'][2][i]
                if self.feature_type == 'combine':
                    if self.rff_dim == 1:
                        num_output_channels = parameters['splitinr_feature'][-1][i]-self.rff_dim
                    else:
                        num_output_channels = parameters['splitinr_feature'][-1][i]-self.rff_dim*2
                else:
                    num_output_channels = parameters['splitinr_feature'][-1][i]
                self.netdict['unet_'+str(i)] = UNet1d(num_input_channels=num_input_channels,
                                                      feature_scale=4, 
                                                      num_output_channels=num_output_channels)

    def feature_map(self,feature,i=0,x=None):
        # feature \in N\times d
        # N is the dimension of input
        # d is the dimension of feature
        if self.feature_type == 'identity' or i>=2:
            return feature
        elif self.feature_type == 'unet':
            return self.netdict['unet_'+str(i)](feature)
        elif self.feature_type == 'combine':
            unet_feature = self.netdict['unet_' + str(i)](feature)
            if self.rff_dim == 1:
                return t.cat((x, unet_feature), dim=1)
            else:
                return t.cat((t.sin(x@self.rffB.to(x.device)),t.cos(x@self.rffB.to(x.device)), unet_feature), dim=1)

    def forward(self,x):
        y_cor = self.netdict['splitinr_cor'](x)
        x_feature = []
        for i in range(self.dim_in):
            feature_i = self.feature_map(self.netdict['splitinr_cor'].pre[i],i,x[i])
            x_feature.append(feature_i)
            self.x_feature = x_feature
        y_feature = self.netdict['splitinr_feature'](x_feature)

        alpha = self.t%20 /20.0
        alpha = 0.5
        self.t += 1
        alpha = 1-1/np.log(self.t)
        return (1-alpha)*y_cor+alpha*y_feature



# class DoubleINR(nn.Module):
#     def __init__(self,parameters):
#         super().__init__()
#         self.netdict = nn.ModuleDict({
#             'inr_cor':get_nn('inr_siren',parameters['inr_cor']),
#             'inr_feature':get_nn('inr_siren',parameters['inr_feature'])})
#         self.dim_in = parameters['inr_cor'][0]
#         self.t = 0
#         try:
#             self.feature_type = parameters['feature_type']
#         except KeyError:
#             self.feature_type = 'identity'
#         if self.feature_type == 'unet':
#             self.netdict['unet'] = UNet(self, num_input_channels=3, num_output_channels=3, 
#                                         feature_scale=4, more_layers=0, concat_x=False,
#                                         upsample_mode='deconv', pad='zero', norm_layer=nn.InstanceNorm2d, 
#                                         need_sigmoid=True, need_bias=True)

#     def feature_map(self,feature,i=0):
#         # feature \in N\times d
#         # N is the dimension of input
#         # d is the dimension of feature
#         if self.feature_type == 'identity' or i>=2:
#             return feature
#         elif self.feature_type == 'unet':
#             return self.netdict['unet'](feature)

#     def forward(self,x):
#         y_cor = self.netdict['inr_cor'](x)
#         x_feature = self.feature_map(y_cor)
#         y_feature = self.netdict['splitinr_feature'](x_feature)
#         alpha = self.t%20 /20.0
#         self.t += 1
#         return (1-alpha)*y_cor+alpha*y_feature



class NonLocal(nn.Module):
    def __init__(self):
        super().__init__()
        """
        输入和split_inr保持一致
        """
        self.netdict = nn.ModuleDict=({
            'cor_net':0,
            'u_net':0,
            'cor_feature_net':0})

    def forward(self, x):
        pass


class Normalization(nn.Module):
    def __init__(self, xrange=1):
        super().__init__()
        self.xrange = xrange

    def forward(self,net,x):
        xmin = -t.ones((1,x.shape[1]))*self.xrange
        xmax = t.ones((1,x.shape[1]))*self.xrange
        xmin = xmin.to(next(net.parameters()).device)
        xmax = xmax.to(next(net.parameters()).device)
        y_gap = net(xmax)-net(xmin)
        return (net(x)-net(xmin))/y_gap*2-1

class SplitMonoto(nn.Module):
    def __init__(self,dim_in,dim_hidden,num_layers,act_name='tanh',monoto_mode=1):
        super().__init__()
        net_list = []
        for _ in range(dim_in):
            net_list.append(INR(1, dim_hidden, 1, num_layers,activation=act_name,monoto_mode=monoto_mode))
        self.net_list = nn.ModuleList(net_list)

    
    def forward(self,x):
        pre = []
        x_max = t.ones(1,1).to(x[0].device)
        x_min = -t.ones(1,1).to(x[0].device)
        for i in range(len(self.net_list)):
            ymax = self.net_list[i](x_max)
            ymin = self.net_list[i](x_min)
            pre.append((self.net_list[i](x[i])-ymin)/(ymax-ymin)*2-1)
        return pre


