import math

import torch
import torch.nn.functional as F

class KANLinear(torch.nn.Module):
    """
    KAN线性层实现
    
    KAN (Kolmogorov-Arnold Network) 线性层是一种基于样条插值的神经网络层，
    它结合了传统线性变换和样条函数的优点，能够学习更复杂的非线性关系。
    
    主要特点：
    1. 使用B样条基函数进行非线性变换
    2. 结合了基础线性变换和样条变换
    3. 支持动态更新网格点以适应数据分布
    4. 可以学习更复杂的函数关系，相比传统线性层具有更强的表达能力
    """
    def __init__(
            self,
            in_features,                              # 输入特征维度
            out_features,                             # 输出特征维度
            grid_size=5,                              # 网格大小，控制样条函数的复杂度
            spline_order=3,                           # 样条阶数，控制样条函数的平滑度
            scale_noise=0.1,                         # 噪声缩放因子，用于初始化样条权重
            scale_base=1.0,                          # 基础权重缩放因子
            scale_spline=1.0,                        # 样条权重缩放因子
            enable_standalone_scale_spline=True,     # 是否启用独立的样条缩放
            base_activation=torch.nn.SiLU,           # 基础激活函数
            grid_eps=0.02,                           # 网格更新时的平滑因子
            grid_range=[-1, 1],                      # 网格范围
    ):
        """
        初始化KAN线性层
        
        Args:
            in_features: 输入特征维度
            out_features: 输出特征维度
            grid_size: 网格大小，控制样条函数的复杂度
            spline_order: 样条阶数，控制样条函数的平滑度
            scale_noise: 噪声缩放因子，用于初始化样条权重
            scale_base: 基础权重缩放因子
            scale_spline: 样条权重缩放因子
            enable_standalone_scale_spline: 是否启用独立的样条缩放
            base_activation: 基础激活函数
            grid_eps: 网格更新时的平滑因子
            grid_range: 网格范围
        """
        super(KANLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.grid_size = grid_size
        self.spline_order = spline_order

        # 计算网格步长
        h = (grid_range[1] - grid_range[0]) / grid_size
        # 创建网格点，扩展到所有输入特征
        grid = (
            (
                    torch.arange(-spline_order, grid_size + spline_order + 1) * h
                    + grid_range[0]
            )
            .expand(in_features, -1)
            .contiguous()
        )
        # 将网格注册为缓冲区，不作为模型参数
        self.register_buffer("grid", grid)

        # 初始化基础权重，用于传统线性变换
        self.base_weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
        # 初始化样条权重，用于样条变换
        self.spline_weight = torch.nn.Parameter(
            torch.Tensor(out_features, in_features, grid_size + spline_order)
        )
        # 如果启用独立的样条缩放，初始化样条缩放参数
        if enable_standalone_scale_spline:
            self.spline_scaler = torch.nn.Parameter(
                torch.Tensor(out_features, in_features)
            )

        # 保存其他参数
        self.scale_noise = scale_noise
        self.scale_base = scale_base
        self.scale_spline = scale_spline
        self.enable_standalone_scale_spline = enable_standalone_scale_spline
        self.base_activation = base_activation()
        self.grid_eps = grid_eps

        # 重置参数
        self.reset_parameters()

    def reset_parameters(self):
        """
        重置模型参数
        
        使用Kaiming均匀分布初始化基础权重，使用随机噪声初始化样条权重
        """
        # 使用Kaiming均匀分布初始化基础权重
        torch.nn.init.kaiming_uniform_(self.base_weight, a=math.sqrt(5) * self.scale_base)
        with torch.no_grad():
            # 生成随机噪声用于初始化样条权重
            noise = (
                    (
                            torch.rand(self.grid_size + 1, self.in_features, self.out_features)
                            - 1 / 2
                    )
                    * self.scale_noise
                    / self.grid_size
            )
            # 使用曲线到系数的转换初始化样条权重
            self.spline_weight.data.copy_(
                (self.scale_spline if not self.enable_standalone_scale_spline else 1.0)
                * self.curve_to_coeff(
                    self.grid.T[self.spline_order: -self.spline_order],
                    noise,
                )
            )
            # 如果启用独立的样条缩放，初始化样条缩放参数
            if self.enable_standalone_scale_spline:
                torch.nn.init.kaiming_uniform_(self.spline_scaler, a=math.sqrt(5) * self.scale_spline)

    def b_splines(self, x: torch.Tensor):
        """
        计算B样条基函数
        
        B样条是一种分段多项式函数，具有局部支撑性和平滑性，
        常用于函数逼近和插值。
        
        Args:
            x: 输入张量，形状为(batch_size, in_features)
            
        Returns:
            B样条基函数值，形状为(batch_size, in_features, grid_size + spline_order)
        """
        assert x.dim() == 2 and x.size(1) == self.in_features
        grid: torch.Tensor = self.grid
        x = x.unsqueeze(-1)
        # 初始化基函数
        bases = ((x >= grid[:, :-1]) & (x < grid[:, 1:])).to(x.dtype)
        # 递归计算高阶B样条基函数
        for k in range(1, self.spline_order + 1):
            bases = (
                            (x - grid[:, : -(k + 1)])
                            / (grid[:, k:-1] - grid[:, : -(k + 1)])
                            * bases[:, :, :-1]
                    ) + (
                            (grid[:, k + 1:] - x)
                            / (grid[:, k + 1:] - grid[:, 1:(-k)])
                            * bases[:, :, 1:]
                    )
        assert bases.size() == (
            x.size(0),
            self.in_features,
            self.grid_size + self.spline_order,
        )
        return bases.contiguous() 

    def curve_to_coeff(self, x: torch.Tensor, y: torch.Tensor):
        """
        将曲线点转换为样条系数
        
        使用最小二乘法求解样条系数，使得样条函数在给定点上拟合给定的曲线值。
        
        Args:
            x: 输入点，形状为(batch_size, in_features)
            y: 曲线值，形状为(batch_size, in_features, out_features)
            
        Returns:
            样条系数，形状为(out_features, in_features, grid_size + spline_order)
        """
        assert x.dim() == 2 and x.size(1) == self.in_features
        assert y.size() == (x.size(0), self.in_features, self.out_features)
        # 计算B样条基函数
        A = self.b_splines(x).transpose(0, 1)
        B = y.transpose(0, 1)
        # 使用最小二乘法求解样条系数
        solution = torch.linalg.lstsq(A, B).solution
        result = solution.permute(2, 0, 1)
        assert result.size() == (
            self.out_features,
            self.in_features,
            self.grid_size + self.spline_order,
        )
        return result.contiguous()

    @property
    def scaled_spline_weight(self):
        """
        获取缩放后的样条权重
        
        如果启用独立的样条缩放，则使用样条缩放参数对样条权重进行缩放，
        否则直接返回原始样条权重。
        
        Returns:
            缩放后的样条权重
        """
        return self.spline_weight * (
            self.spline_scaler.unsqueeze(-1)
            if self.enable_standalone_scale_spline
            else 1.0
        )

    def forward(self, x: torch.Tensor):
        """
        前向传播函数
        
        将输入张量通过KAN线性层进行变换，输出结果为基础线性变换和样条变换的和。
        
        Args:
            x: 输入张量，形状为(..., in_features)
            
        Returns:
            输出张量，形状为(..., out_features)
        """
        assert x.size(-1) == self.in_features
        # 保存原始形状
        original_shape = x.shape
        # 将输入张量重塑为二维张量
        x = x.reshape(-1, self.in_features)
        # 计算基础线性变换
        base_output = F.linear(self.base_activation(x), self.base_weight)
        # 计算样条变换
        spline_output = F.linear(
            self.b_splines(x).view(x.size(0), -1),
            self.scaled_spline_weight.view(self.out_features, -1),
        )
        # 将基础输出和样条输出相加
        output = base_output + spline_output
        # 恢复原始形状（除了最后一维）
        output = output.reshape(*original_shape[:-1], self.out_features)
        return output

    @torch.no_grad()
    def update_grid(self, x: torch.Tensor, margin=0.01):
        """
        更新网格点
        
        根据输入数据的分布自适应地更新网格点，使网格点更好地适应数据分布。
        结合了均匀网格和自适应网格的优点。
        
        Args:
            x: 输入张量，形状为(batch_size, in_features)
            margin: 边界边距，用于扩展网格范围
        """
        assert x.dim() == 2 and x.size(1) == self.in_features
        batch = x.size(0)
        # 计算B样条基函数
        splines = self.b_splines(x)
        splines = splines.permute(1, 0, 2)
        # 获取缩放后的样条权重
        orig_coeff = self.scaled_spline_weight
        orig_coeff = orig_coeff.permute(1, 2, 0)
        # 计算未简化的样条输出
        unreduced_spline_output = torch.bmm(splines, orig_coeff)
        unreduced_spline_output = unreduced_spline_output.permute(1, 0, 2)
        # 对输入数据进行排序
        x_sorted = torch.sort(x, dim=0)[0]
        # 计算自适应网格点
        grid_adaptive = x_sorted[
            torch.linspace(
                0, batch - 1, self.grid_size + 1, dtype=torch.int64, device=x.device
            )
        ]
        # 计算均匀网格步长
        uniform_step = (x_sorted[-1] - x_sorted[0] + 2 * margin) / self.grid_size
        # 计算均匀网格点
        grid_uniform = (
                torch.arange(
                    self.grid_size + 1, dtype=torch.float32, device=x.device
                ).unsqueeze(1)
                * uniform_step
                + x_sorted[0]
                - margin
        )
        # 结合均匀网格和自适应网格
        grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive
        # 扩展网格点以支持边界条件
        grid = torch.concatenate(
            [
                grid[:1]
                - uniform_step
                * torch.arange(self.spline_order, 0, -1, device=x.device).unsqueeze(1),
                grid,
                grid[-1:]
                + uniform_step
                * torch.arange(1, self.spline_order + 1, device=x.device).unsqueeze(1),
            ],
            dim=0,
        )
        # 更新网格点
        self.grid.copy_(grid.T)
        # 更新样条权重
        self.spline_weight.data.copy_(self.curve_to_coeff(x, unreduced_spline_output))