import torch
import math
from lib.conv_kan.KANLinear import KANLinear
import lib.conv_kan.convolution as convolution


#Script que contiene la implementación del kernel con funciones de activación.
class KAN_Convolutional_Layer(torch.nn.Module):
    def __init__(
            self,
            in_channels: int = 1,
            out_channels: int = 1,
            kernel_size: tuple = (2,2),
            stride: tuple = (1,1),
            padding: tuple = (0,0),
            dilation: tuple = (1,1),
            grid_size: int = 5,
            spline_order:int = 3,
            scale_noise:float = 0.1,
            scale_base: float = 1.0,
            scale_spline: float = 1.0,
            base_activation=torch.nn.SiLU,
            grid_eps: float = 0.02,
            grid_range: tuple = [-1, 1],
            device: str = "cpu"
        ):
        """
        Kan Convolutional Layer with multiple convolutions
        
        Args:
            n_convs (int): Number of convolutions to apply
            kernel_size (tuple): Size of the kernel
            stride (tuple): Stride of the convolution
            padding (tuple): Padding of the convolution
            dilation (tuple): Dilation of the convolution
            grid_size (int): Size of the grid
            spline_order (int): Order of the spline
            scale_noise (float): Scale of the noise
            scale_base (float): Scale of the base
            scale_spline (float): Scale of the spline
            base_activation (torch.nn.Module): Activation function
            grid_eps (float): Epsilon of the grid
            grid_range (tuple): Range of the grid
            device (str): Device to use
        """


        super(KAN_Convolutional_Layer, self).__init__()
        self.out_channels = out_channels # 输出通道数，即卷积核的数量，作用是控制输出特征图的深度
        self.in_channels = in_channels # 输入通道数，即输入特征图的深度，作用是控制输入特征图的深度

        self.grid_size = grid_size # 网格大小，即网格的边长，作用是将输入的特征图分割成多个网格，每个网格上的特征图都是相同的
        self.spline_order = spline_order # 样条顺序，即样条的阶数，作用是控制样条的平滑程度
        self.kernel_size = kernel_size # 卷积核大小
        # self.device = device
        self.dilation = dilation # 空洞卷积的空洞大小，即卷积核元素之间的间隔，作用是增大卷积核的感受野
        self.padding = padding # 填充大小，即在输入特征图的边缘填充0的层数，作用是保持特征图的大小
        self.convs = torch.nn.ModuleList()
        self.stride = stride # 卷积核移动的步长，作用是控制输出特征图的大小

        
        # Create n_convs KAN_Convolution objects
        for _ in range(in_channels*out_channels):
            self.convs.append(
                KAN_Convolution(
                    kernel_size= kernel_size, # 卷积核大小
                    stride = stride, # 卷积核移动的步长
                    padding=padding, # 填充大小
                    dilation = dilation, # 空洞卷积的空洞大小
                    grid_size=grid_size, # 网格大小
                    spline_order=spline_order, # 样条顺序
                    scale_noise=scale_noise, # 噪声的规模， 作用是控制噪声的大小，增加模型的鲁棒性
                    scale_base=scale_base, # 基础的规模， 作用是控制基础的大小，增加模型的鲁棒性
                    scale_spline=scale_spline, # 样条的规模， 作用是控制样条的大小，增加模型的鲁棒性
                    base_activation=base_activation, # 基础激活函数， 作用是增加模型的非线性
                    grid_eps=grid_eps, # 网格的epsilon， 作用是控制网格的大小，增加模型的鲁棒性
                    grid_range=grid_range, # 网格的范围， 作用是控制网格的大小，增加模型的鲁棒性 
                    # device = device ## changed device to be allocated as per the input device for pytorch DDP
                )
            )

    def forward(self, x: torch.Tensor):
        # If there are multiple convolutions, apply them all
        self.device = x.device
        #if self.n_convs>1:
        return convolution.multiple_convs_kan_conv2d(x, self.convs,self.kernel_size[0],self.out_channels,self.stride,self.dilation,self.padding,self.device)
        
        # If there is only one convolution, apply it
        #return self.convs[0].forward(x)
        

class KAN_Convolution(torch.nn.Module):
    def __init__(
            self,
            kernel_size: tuple = (2,2),
            stride: tuple = (1,1),
            padding: tuple = (0,0),
            dilation: tuple = (1,1),
            grid_size: int = 5,
            spline_order: int = 3,
            scale_noise: float = 0.1,
            scale_base: float = 1.0,
            scale_spline: float = 1.0,
            base_activation=torch.nn.SiLU,
            grid_eps: float = 0.02,
            grid_range: tuple = [-1, 1],
            device = "cpu"
        ):
        """
        Args
        """
        super(KAN_Convolution, self).__init__()
        self.grid_size = grid_size
        self.spline_order = spline_order
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        # self.device = device
        self.conv = KANLinear(
            in_features = math.prod(kernel_size),
            out_features = 1,
            grid_size=grid_size,
            spline_order=spline_order,
            scale_noise=scale_noise,
            scale_base=scale_base,
            scale_spline=scale_spline,
            base_activation=base_activation,
            grid_eps=grid_eps,
            grid_range=grid_range
        )

    def forward(self, x: torch.Tensor):
        self.device = x.device
        return convolution.kan_conv2d(x, self.conv,self.kernel_size[0],self.stride,self.dilation,self.padding,self.device)
    
    def regularization_loss(self, regularize_activation=1.0, regularize_entropy=1.0):
        return sum( layer.regularization_loss(regularize_activation, regularize_entropy) for layer in self.layers)


