import torch
import torch.nn as nn
import numpy as np
from scipy.cluster.vq import kmeans
import scipy

class AbstractDictionary(object):
    def generate_B(self, inputs):
        target_dim = inputs.shape[-1]
        self.basis_func_number = self.n_dic_customized + target_dim + 1
        # Form B matrix
        self.B = np.zeros((self.basis_func_number, target_dim))
        for i in range(0, target_dim):
            self.B[i + 1][i] = 1
        return self.B

class DicNN(nn.Module):
    """Trainable dictionaries"""
    def __init__(self, layer_sizes=[64, 64], n_psi_train=22):
        super(DicNN, self).__init__()
        self.layer_sizes = layer_sizes
        self.n_psi_train = n_psi_train
        
        # 修改网络结构，确保维度匹配
        self.input_layer = nn.Linear(layer_sizes[0], layer_sizes[0], bias=False, dtype=torch.double)
        self.hidden_layers = nn.ModuleList([
            nn.Linear(layer_sizes[0], layer_sizes[0], dtype=torch.double) for _ in range(len(layer_sizes))
        ])
        self.output_layer = nn.Linear(layer_sizes[0], n_psi_train, dtype=torch.double)
        self.activation = nn.Tanh()

    def forward(self, x):
        # 确保输入是double类型
        x = x.double()
        x = self.input_layer(x)
        for layer in self.hidden_layers:
            x = x + self.activation(layer(x))
        x = self.output_layer(x)
        return x

class PsiNN(nn.Module, AbstractDictionary):
    """Concatenate constant, data and trainable dictionaries together as [1, data, DicNN]"""
    def __init__(self, dic_trainable=DicNN, layer_sizes=[64, 64], n_psi_train=22):
        super(PsiNN, self).__init__()
        self.layer_sizes = layer_sizes
        self.dic_trainable = dic_trainable
        self.n_dic_customized = n_psi_train
        self.dicNN = self.dic_trainable(
            layer_sizes=self.layer_sizes,
            n_psi_train=self.n_dic_customized)

    def forward(self, x):
        # 确保输入是double类型
        x = x.double()
        constant = torch.ones_like(x[:, :1])
        psi_x_train = self.dicNN(x)
        outputs = torch.cat([constant, x, psi_x_train], dim=-1)
        return outputs

class DicRBF(AbstractDictionary):
    """RBF based on notations in (https://en.wikipedia.org/wiki/Radial_basis_function)"""
    def __init__(self, rbf_number=100, regularizer=1e-4):
        self.n_dic_customized = rbf_number
        self.regularizer = regularizer

    def build(self, data):
        # 确保数据是numpy数组
        if isinstance(data, torch.Tensor):
            data = data.detach().cpu().numpy()
        self.centers, residual = kmeans(data, self.n_dic_customized)

    def forward(self, data):
        # 确保数据是numpy数组
        if isinstance(data, torch.Tensor):
            data = data.detach().cpu().numpy()
            
        rbfs = []
        for n in range(self.centers.shape[0]):
            r = scipy.spatial.distance.cdist(
                data, np.matrix(self.centers[n, :]))
            rbf = scipy.special.xlogy(r**2, r + self.regularizer)
            rbfs.append(rbf)

        rbfs = torch.tensor(np.array(rbfs), dtype=torch.double).mT
        rbfs = rbfs.reshape(data.shape[0], -1)

        ones = torch.ones((rbfs.shape[0], 1), dtype=torch.double)
        data_tensor = torch.tensor(data, dtype=torch.double)
        results = torch.cat([ones, data_tensor, rbfs], dim=-1)
        return results
    
if __name__ == "__main__":
    # 设置随机种子以确保结果可重复
    torch.manual_seed(42)
    np.random.seed(42)
    
    # 测试参数
    batch_size = 32
    input_dim = 64  # 修改为64维输入
    layer_sizes = [64, 64]
    n_psi_train = 22
    
    print("=== 网络结构分析 ===")
    
    # 1. 打印 DicNN 结构
    print("\n1. DicNN 网络结构:")
    dic_nn = DicNN(layer_sizes=layer_sizes, n_psi_train=n_psi_train)
    print(dic_nn)
    
    # 2. 打印 PsiNN 结构
    print("\n2. PsiNN 网络结构:")
    psi_nn = PsiNN(dic_trainable=DicNN, layer_sizes=layer_sizes, n_psi_train=n_psi_train)
    print(psi_nn)
    
    # 3. 打印 DicRBF 结构
    print("\n3. DicRBF 结构:")
    rbf_number = 10
    dic_rbf = DicRBF(rbf_number=rbf_number)
    print(f"RBF中心点数量: {rbf_number}")
    
    print("\n=== 开始测试网络功能 ===")
    
    # 1. 测试 DicNN
    print("\n1. 测试 DicNN:")
    test_input = torch.randn(batch_size, input_dim, dtype=torch.double)
    output = dic_nn(test_input)
    print(f"输入维度: {test_input.shape}")
    print(f"输出维度: {output.shape}")
    print(f"预期输出维度: [{batch_size}, {n_psi_train}]")
    assert output.shape == (batch_size, n_psi_train), "DicNN输出维度不正确"
    print("DicNN测试通过！")
    
    # 2. 测试 PsiNN
    print("\n2. 测试 PsiNN:")
    test_input = torch.randn(batch_size, input_dim, dtype=torch.double)
    output = psi_nn(test_input)
    expected_output_dim = 1 + input_dim + n_psi_train
    print(f"输入维度: {test_input.shape}")
    print(f"输出维度: {output.shape}")
    print(f"预期输出维度: [{batch_size}, {expected_output_dim}]")
    assert output.shape == (batch_size, expected_output_dim), "PsiNN输出维度不正确"
    print("PsiNN测试通过！")
    
    # 3. 测试 DicRBF
    print("\n3. 测试 DicRBF:")
    test_input = torch.randn(batch_size, input_dim, dtype=torch.double)
    dic_rbf.build(test_input)
    output = dic_rbf.forward(test_input)
    expected_output_dim = 1 + input_dim + rbf_number
    print(f"输入维度: {test_input.shape}")
    print(f"输出维度: {output.shape}")
    print(f"预期输出维度: [{batch_size}, {expected_output_dim}]")
    assert output.shape == (batch_size, expected_output_dim), "DicRBF输出维度不正确"
    print("DicRBF测试通过！")
    
    # 4. 测试网络参数
    print("\n4. 测试网络参数:")
    print("\nDicNN参数统计:")
    total_params = sum(p.numel() for p in dic_nn.parameters())
    print(f"总参数量: {total_params}")
    print("各层参数:")
    for name, param in dic_nn.named_parameters():
        print(f"{name}: {param.shape}")
    
    print("\nPsiNN参数统计:")
    total_params = sum(p.numel() for p in psi_nn.parameters())
    print(f"总参数量: {total_params}")
    print("各层参数:")
    for name, param in psi_nn.named_parameters():
        print(f"{name}: {param.shape}")
    
    print("\n所有测试通过！网络结构和输出维度正确。")