import os
import time
import math

import numpy as np

import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init

from utils.general_utils import get_expon_lr_func
from utils.system_utils import searchForMaxIteration
from scene.deformation import deform_network

class Embedder:
    def __init__(self, include_input=True, input_dim=3, num_freqs=10):
        self.include_input = include_input
        self.in_dim = input_dim
        self.num_freqs = num_freqs
        self.out_dim = 0

        self.create_embedding_fn()

    def create_embedding_fn(self):
        embed_fns = []
        out_dim = 0
        if self.include_input:
            embed_fns.append(lambda x: x)
            out_dim += self.in_dim
        
        # Use log sampling by default
        freq_bands = 2. ** torch.linspace(0., self.num_freqs - 1, steps=self.num_freqs)

        for freq in freq_bands:
            embed_fns.append(lambda x, p_fn=torch.sin, freq=freq: p_fn(x * freq))
            embed_fns.append(lambda x, p_fn=torch.cos, freq=freq: p_fn(x * freq))
            out_dim += 2 * self.in_dim
        
        self.embed_fns = embed_fns
        self.out_dim = out_dim
    
    def embed(self, inputs):
        return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
    
    @property
    def get_out_dim(self):
        return self.out_dim

"""
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####----------------------MLP for deformation--------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
"""

# 默认变形位置、尺度、旋转，不变形不透明度和SH
class DeformMLP(nn.Module):
    def __init__(self, args, D=8, W=256, input_ch=4, output_ch=10):
        super(DeformMLP, self).__init__()
        # Base parameters
        self.D = D
        self.W = W
        self.input_ch = input_ch
        self.output_ch = output_ch
        self.skips = [D // 2]

        self.is_pos_defor = True
        self.is_sca_defor = True
        self.is_rot_defor = True
        self.is_opa_defor = False
        self.is_shs_defor = False

        self.tim_aug = True
        self.tim_out_dim = 30

        # Embedder parameters
        self.pos_is_eb = True
        self.tim_is_eb = True
        self.sca_is_eb = False
        self.rot_is_eb = False
        
        self.pos_pe = 10
        self.tim_pe = 2
        self.sca_pe = 2
        self.rot_pe = 2

        self.pos_embedder = Embedder(include_input=True, input_dim=3, num_freqs=self.pos_pe) if self.pos_is_eb else None
        self.tim_embedder = Embedder(include_input=True, input_dim=1, num_freqs=self.tim_pe) if self.tim_is_eb else None
        self.sca_embedder = Embedder(include_input=True, input_dim=3, num_freqs=self.sca_pe) if self.sca_is_eb else None
        self.rot_embedder = Embedder(include_input=True, input_dim=4, num_freqs=self.rot_pe) if self.rot_is_eb else None

        self.initializaion()

    def initializaion(self):
        self.time_input_ch = 1
        self.time_output_ch = 1

        if self.tim_aug:
            self.time_output_ch = 30
            self.tim_net = nn.Sequential(
                nn.Linear((self.time_input_ch if not self.tim_is_eb else self.tim_embedder.out_dim), 256),
                nn.ReLU(),
                nn.Linear(256, self.time_output_ch)
            )
        
        self.pos_input_ch = self.pos_embedder.out_dim if self.pos_embedder else 3
        self.linear = nn.ModuleList(
            [nn.Linear(self.pos_input_ch + self.time_output_ch, self.W)] + \
            [nn.Linear(self.W, self.W) if i not in self.skips else \
            nn.Linear(self.W + self.pos_input_ch + self.time_output_ch, self.W) \
            for i in range(self.D - 1)]
        )

        self.defor_pos = nn.Linear(self.W, 3) if self.is_pos_defor else None
        self.defor_sca = nn.Linear(self.W, 3) if self.is_sca_defor else None
        self.defor_rot = nn.Linear(self.W, 4) if self.is_rot_defor else None
        self.defor_opa = nn.Linear(self.W, 1) if self.is_opa_defor else None
        self.defor_shs = nn.Linear(self.W, 16) if self.is_shs_defor else None

    def forward(self, pos, tim):
        # st = time.time()
        pos_eb = pos
        tim_eb = tim
        if self.pos_is_eb:
            pos_eb = self.pos_embedder.embed(pos)
        if self.tim_is_eb:
            tim_eb = self.tim_embedder.embed(tim)
        
        if self.tim_aug:
            tim_eb = self.tim_net(tim_eb)
        
        h = torch.cat([pos_eb, tim_eb], -1)
        for id, net in enumerate(self.linear):
            h = net(h)
            h = F.relu(h)
            if id in self.skips:
                h = torch.cat([pos_eb, tim_eb, h], -1)

        d_pos = self.defor_pos(h) if self.is_pos_defor else None
        d_sca = self.defor_sca(h) if self.is_sca_defor else None
        d_rot = self.defor_rot(h) if self.is_rot_defor else None
        d_opa = self.defor_opa(h) if self.is_opa_defor else None
        d_shs = self.defor_shs(h) if self.is_shs_defor else None
        # print('MLP predict per model set', time.time() - st)
        return d_pos, d_sca, d_rot

"""
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####----------------------MLP for deformation--------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
"""




"""
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####----------------------KAN for deformation--------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
"""

import kan

class DeformKAN(nn.Module):
    def __init__(self, kan_kwargs):
        super(DeformKAN, self).__init__()
        device = torch.device("cuda")

        # self.model = kan.KAN(**kan_kwargs)
        # self.models = []
        # for i in range(10):
        #     self.models.append(kan.KAN(width=[4, 9, 1], grid=3, k=3, seed=0, device=device))
        #     self.models[-1](torch.rand(1000, 4).cuda())

        self.model = kan.KAN(width=[4, 9, 10], grid=2, k=1, seed=0, device=device)

    def forward(self, pos, tim):
        # outputs = []
        # # st = time.time()
        # for model in self.models:
        #     outputs.append(model(torch.cat([pos, tim], -1)))
        # # print('KAN predict per model set', time.time() - st)

        # return torch.cat(outputs[:3], -1), torch.cat(outputs[3:6], -1), torch.cat(outputs[6:], -1)

        # st = time.time()
        output = self.model(torch.cat([pos, tim], -1))
        # print('KAN predict per model set', time.time() - st)
        return output[:,:3], output[:,3:6], output[:,6:]


    def get_parameters(self, position_lr_init, spatial_lr_scale):
        # l = []
        # for id, model in enumerate(self.models):
        #     l.append({'params': model.get_params(),
        #           'lr': position_lr_init * spatial_lr_scale,
        #           'name': f'deform{id}'})
        # return l

        l = [{'params': list(self.model.get_params()),
              'lr': position_lr_init * spatial_lr_scale,
              'name': 'deform'}]
        return l


"""
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####----------------------KAN for deformation--------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
#####-------------------------------------------------------------------------------
"""






class DeformModule:
    def __init__(self, args):
        # Existing models
        self.deformation_models = {'HexPlane': 'deform_network', 'MLP': 'DeformMLP', 'KAN': 'DeformKAN'}

        self.use_deformation = args.use_deformation
        self.deform = None

        
        if self.use_deformation in self.deformation_models:
            self.deform = globals()[self.deformation_models[self.use_deformation]](args)
        else:
            raise "Deformation model is None"
        
        self.optimizer = None
        self.args = args

        # Global parameters
        self.spatial_lr_scale = 5  # Important, spatial context by default

    def predict(self, means3D, scales, rotations, opacity, shs, time):
        if self.use_deformation == 'MLP' or self.use_deformation == 'KAN':
            d_pos, d_sca, d_rot = self.deform(means3D, time)
            return means3D + d_pos, scales + d_sca, rotations + d_rot, opacity, shs
        elif self.use_deformation == 'HexPlane':
            return self.deform(means3D, scales, rotations, opacity, shs, time)
        
    
    def cuda(self):
        self.deform.cuda()

    def train_setup(self, training_args):
        if self.use_deformation == 'HexPlane':
            l = [{'params': list(self.deform.get_mlp_parameters()),
                  'lr': training_args.deformation_lr_init * self.spatial_lr_scale,
                  "name": "deformation"},
                {'params': list(self.deform.get_grid_parameters()),
                 'lr': training_args.grid_lr_init * self.spatial_lr_scale,
                 "name": "grid"}]
            
            self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)

            self.deformation_scheduler_args = get_expon_lr_func(lr_init=training_args.deformation_lr_init*self.spatial_lr_scale,
                                                    lr_final=training_args.deformation_lr_final*self.spatial_lr_scale,
                                                    lr_delay_mult=training_args.deformation_lr_delay_mult,
                                                    max_steps=training_args.position_lr_max_steps)    
            self.grid_scheduler_args = get_expon_lr_func(lr_init=training_args.grid_lr_init*self.spatial_lr_scale,
                                                        lr_final=training_args.grid_lr_final*self.spatial_lr_scale,
                                                        lr_delay_mult=training_args.deformation_lr_delay_mult,
                                                        max_steps=training_args.position_lr_max_steps)
            

        elif self.use_deformation == 'MLP':
            l = [{'params': list(self.deform.parameters()),
                  'lr': training_args.position_lr_init * self.spatial_lr_scale,
                  'name': 'deform'}]
            
            self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)

            self.deform_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
                                                        lr_final=training_args.position_lr_final,
                                                        lr_delay_mult=training_args.position_lr_delay_mult,
                                                        max_steps=training_args.position_lr_max_steps)

        elif self.use_deformation == 'KAN':
            self.optimizer = torch.optim.Adam(self.deform.get_parameters(training_args.position_lr_init, self.spatial_lr_scale), lr=0.0, eps=1e-15)

            self.deform_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
                                                        lr_final=training_args.position_lr_final,
                                                        lr_delay_mult=training_args.position_lr_delay_mult,
                                                        max_steps=training_args.position_lr_max_steps)
        
    

    def state_dict(self):
        return self.deform.state_dict()

    def load_state_dict(self, state):
        self.deform.load_state_dict(state)

    def save_weights(self, model_path, iteration):
        out_weights_path = os.path.join(model_path, "deform/iteration_{}".format(iteration))
        os.makedirs(out_weights_path, exist_ok=True)
        torch.save(self.deform.state_dict(), os.path.join(out_weights_path, 'deform.pth'))
    
    def load_weights(self, model_path, iteration=-1):
        if iteration == -1:
            loaded_iter = searchForMaxIteration(os.path.join(model_path, "deform"))
        else:
            loaded_iter = iteration
        weights_path = os.path.join(model_path, "deform/iteration_{}/deform.pth".format(loaded_iter))
        self.deform.load_state_dict(torch.load(weights_path))
    
    def update_learning_rate(self, iteration):
        for param_group in self.optimizer.param_groups:
            if param_group["name"] == "deform":
                lr = self.deform_scheduler_args(iteration)
                param_group['lr'] = lr
                # return lr
            if  param_group["name"] == "grid":
                lr = self.grid_scheduler_args(iteration)
                param_group['lr'] = lr
                # return lr
            elif param_group["name"] == "deformation":
                lr = self.deformation_scheduler_args(iteration)
                param_group['lr'] = lr
                # return lr





        



        




