"""
教学版本的切比雪夫多项式KAN层实现
包含详细的注释和步骤说明，便于理解KAN网络的工作原理
"""

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

class EducationalChebyKANLinear(nn.Module):
    """
    教学版本的切比雪夫多项式KAN层
    
    KAN网络的核心思想：
    - 传统MLP：在节点上应用固定激活函数
    - KAN网络：在边上使用可学习的激活函数
    
    切比雪夫多项式的优势：
    - 数值稳定性好
    - 计算效率高
    - 逼近能力强
    """
    
    def __init__(self, input_dim, output_dim, degree=3):
        """
        初始化切比雪夫KAN层
        
        Args:
            input_dim (int): 输入维度
            output_dim (int): 输出维度  
            degree (int): 切比雪夫多项式的最高次数
        """
        super(EducationalChebyKANLinear, self).__init__()
        
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.degree = degree
        
        # 可学习的切比雪夫系数
        # 形状: (input_dim, output_dim, degree+1)
        # 每个输入-输出对都有一组切比雪夫系数
        self.cheby_coeffs = nn.Parameter(
            torch.empty(input_dim, output_dim, degree + 1)
        )
        
        # 初始化系数
        nn.init.normal_(
            self.cheby_coeffs, 
            mean=0.0, 
            std=1.0 / (input_dim * (degree + 1))
        )
        
        # 注册度数序列 [0, 1, 2, ..., degree]
        self.register_buffer(
            "degree_range", 
            torch.arange(0, degree + 1, dtype=torch.float32)
        )
        
        print(f"创建KAN层: {input_dim} -> {output_dim}, 度数={degree}")
        print(f"切比雪夫系数形状: {self.cheby_coeffs.shape}")
    
    def chebyshev_polynomials(self, x):
        """
        计算切比雪夫多项式值
        
        切比雪夫多项式定义: T_n(x) = cos(n * arccos(x)), x ∈ [-1,1]
        
        Args:
            x (torch.Tensor): 输入张量，形状为 (batch_size, input_dim, 1)
            
        Returns:
            torch.Tensor: 切比雪夫多项式值，形状为 (batch_size, input_dim, degree+1)
        """
        # 步骤1: 将输入归一化到 [-1, 1] 区间
        # 使用tanh函数确保输入在有效范围内
        x_normalized = torch.tanh(x)
        
        # 步骤2: 计算 arccos(x)
        # 为了数值稳定性，再次应用tanh
        x_clamped = torch.tanh(x_normalized)
        theta = torch.acos(x_clamped)
        
        # 步骤3: 计算 n * arccos(x)，其中 n = [0, 1, 2, ..., degree]
        # 广播操作：theta形状为(batch, input_dim, 1)，degree_range形状为(degree+1,)
        n_theta = theta * self.degree_range  # 形状: (batch, input_dim, degree+1)
        
        # 步骤4: 计算 cos(n * arccos(x)) = T_n(x)
        chebyshev_values = torch.cos(n_theta)
        
        return chebyshev_values
    
    def forward(self, x):
        """
        前向传播
        
        Args:
            x (torch.Tensor): 输入张量，形状为 (batch_size, input_dim)
            
        Returns:
            torch.Tensor: 输出张量，形状为 (batch_size, output_dim)
        """
        batch_size, input_dim = x.shape
        
        # 步骤1: 扩展输入维度以计算切比雪夫多项式
        # 从 (batch_size, input_dim) 扩展到 (batch_size, input_dim, 1)
        x_expanded = x.unsqueeze(-1)  # 形状: (batch_size, input_dim, 1)
        
        # 步骤2: 计算切比雪夫多项式值
        # 输出形状: (batch_size, input_dim, degree+1)
        cheby_values = self.chebyshev_polynomials(x_expanded)
        
        # 步骤3: 使用Einstein求和进行线性组合
        # cheby_values: (batch_size, input_dim, degree+1) -> 'bid'
        # cheby_coeffs: (input_dim, output_dim, degree+1) -> 'iod'  
        # 输出: (batch_size, output_dim) -> 'bo'
        output = torch.einsum('bid,iod->bo', cheby_values, self.cheby_coeffs)
        
        return output
    
    def visualize_basis_functions(self, input_idx=0, output_idx=0):
        """
        可视化特定输入-输出对的基函数
        
        Args:
            input_idx (int): 输入维度索引
            output_idx (int): 输出维度索引
        """
        x = torch.linspace(-1, 1, 1000)
        
        plt.figure(figsize=(12, 8))
        
        # 绘制每个切比雪夫多项式及其系数
        for n in range(self.degree + 1):
            T_n = torch.cos(n * torch.acos(x))
            coeff = self.cheby_coeffs[input_idx, output_idx, n].item()
            
            plt.subplot(2, 1, 1)
            plt.plot(x.numpy(), T_n.numpy(), label=f'T_{n}(x)', linewidth=2)
            
            plt.subplot(2, 1, 2)
            plt.plot(x.numpy(), coeff * T_n.numpy(), 
                    label=f'{coeff:.3f} * T_{n}(x)', linewidth=2)
        
        plt.subplot(2, 1, 1)
        plt.title('切比雪夫多项式基函数')
        plt.xlabel('x')
        plt.ylabel('T_n(x)')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.subplot(2, 1, 2)
        plt.title(f'加权的切比雪夫多项式 (输入{input_idx} -> 输出{output_idx})')
        plt.xlabel('x')
        plt.ylabel('系数 * T_n(x)')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
    
    def get_learned_function(self, input_idx=0, output_idx=0, x_range=(-2, 2), num_points=1000):
        """
        获取学习到的函数
        
        Args:
            input_idx (int): 输入维度索引
            output_idx (int): 输出维度索引
            x_range (tuple): x的范围
            num_points (int): 采样点数
            
        Returns:
            tuple: (x值, 对应的函数值)
        """
        x = torch.linspace(x_range[0], x_range[1], num_points)
        
        # 创建只有一个非零输入的张量
        input_tensor = torch.zeros(num_points, self.input_dim)
        input_tensor[:, input_idx] = x
        
        with torch.no_grad():
            output = self.forward(input_tensor)
            function_values = output[:, output_idx]
        
        return x.numpy(), function_values.numpy()

def demonstrate_educational_kan():
    """
    演示教学版KAN层的功能
    """
    print("=== 教学版切比雪夫KAN层演示 ===\n")
    
    # 创建KAN层
    kan_layer = EducationalChebyKANLinear(input_dim=2, output_dim=1, degree=4)
    
    # 生成测试数据
    x_test = torch.randn(10, 2)
    print(f"测试输入形状: {x_test.shape}")
    
    # 前向传播
    with torch.no_grad():
        output = kan_layer(x_test)
    print(f"输出形状: {output.shape}")
    print(f"输出值: {output.flatten()}")
    
    # 可视化基函数
    print("\n可视化切比雪夫基函数...")
    kan_layer.visualize_basis_functions(input_idx=0, output_idx=0)
    
    # 训练一个简单的函数
    print("\n训练KAN层学习 sin(x) 函数...")
    train_kan_to_learn_function(kan_layer)
    
    return kan_layer

def train_kan_to_learn_function(kan_layer):
    """
    训练KAN层学习sin(x)函数
    
    Args:
        kan_layer: KAN层实例
    """
    # 生成训练数据
    x_train = torch.linspace(-2, 2, 1000).unsqueeze(1)  # 只使用第一个输入维度
    x_input = torch.zeros(1000, kan_layer.input_dim)
    x_input[:, 0] = x_train.flatten()
    
    # 目标函数: sin(x)
    y_target = torch.sin(x_train)
    
    # 优化器
    optimizer = torch.optim.Adam(kan_layer.parameters(), lr=0.01)
    criterion = nn.MSELoss()
    
    # 训练循环
    losses = []
    for epoch in range(500):
        optimizer.zero_grad()
        
        output = kan_layer(x_input)
        loss = criterion(output, y_target)
        
        loss.backward()
        optimizer.step()
        
        losses.append(loss.item())
        
        if epoch % 100 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.6f}")
    
    # 可视化训练结果
    with torch.no_grad():
        x_test = torch.linspace(-3, 3, 1000)
        x_test_input = torch.zeros(1000, kan_layer.input_dim)
        x_test_input[:, 0] = x_test
        
        y_pred = kan_layer(x_test_input)
        y_true = torch.sin(x_test)
    
    plt.figure(figsize=(15, 5))
    
    # 绘制训练损失
    plt.subplot(1, 3, 1)
    plt.plot(losses)
    plt.title('训练损失')
    plt.xlabel('Epoch')
    plt.ylabel('MSE Loss')
    plt.grid(True, alpha=0.3)
    
    # 绘制函数拟合结果
    plt.subplot(1, 3, 2)
    plt.plot(x_test.numpy(), y_true.numpy(), 'b-', label='真实 sin(x)', linewidth=2)
    plt.plot(x_test.numpy(), y_pred.numpy(), 'r--', label='KAN预测', linewidth=2)
    plt.title('函数拟合结果')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 绘制学习到的切比雪夫系数
    plt.subplot(1, 3, 3)
    coeffs = kan_layer.cheby_coeffs[0, 0, :].detach().numpy()
    plt.bar(range(len(coeffs)), coeffs)
    plt.title('学习到的切比雪夫系数')
    plt.xlabel('多项式次数')
    plt.ylabel('系数值')
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    # 运行演示
    kan_layer = demonstrate_educational_kan()
    
    print("\n=== 演示完成 ===")
    print("通过这个例子，您可以看到：")
    print("1. KAN层如何使用切比雪夫多项式作为基函数")
    print("2. 可学习的系数如何组合这些基函数")
    print("3. KAN层如何学习复杂的非线性函数")
    print("4. 相比传统MLP，KAN层具有更强的表达能力")