import numpy as np
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import context
from mindspore import Parameter
from scipy.cluster.vq import kmeans
import scipy

# 设置使用float64精度
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
use_gpu = context.get_context(attr_key='device_target') == "GPU"

class AbstractDictionary(object):
    def generate_B(self, inputs):
        target_dim = inputs.shape[-1]
        self.basis_func_number = self.n_dic_customized + target_dim + 1
        # Form B matrix
        self.B = np.zeros((self.basis_func_number, target_dim))
        for i in range(0, target_dim):
            self.B[i + 1][i] = 1
        return self.B

class DicNN(nn.Cell):
    """Trainable dictionaries"""
    def __init__(self, layer_sizes=[64, 64], n_psi_train=22):
        super(DicNN, self).__init__()
        self.layer_sizes = layer_sizes
        self.n_psi_train = n_psi_train
        
        # 修改网络结构，确保维度匹配
        self.input_layer = nn.Dense(layer_sizes[0], layer_sizes[0], 
                                   weight_init='xavier_uniform',
                                   has_bias=False)
        
        # 创建隐藏层列表
        self.hidden_layers = nn.CellList()
        for _ in range(len(layer_sizes)):
            layer = nn.Dense(layer_sizes[0], layer_sizes[0],
                           weight_init='xavier_uniform',
                           bias_init='zeros',
                           has_bias=True)
            self.hidden_layers.append(layer)
        
        # 输出层
        self.output_layer = nn.Dense(layer_sizes[0], n_psi_train,
                                   weight_init='xavier_uniform',
                                   bias_init='zeros',
                                   has_bias=True)
        
        self.activation = nn.Tanh()
        self.cast = ops.Cast()

    def construct(self, x):
        x = self.input_layer(x)
        for layer in self.hidden_layers:
            x = x + self.activation(layer(x))
        x = self.output_layer(x)
        return x

class PsiNN(nn.Cell, AbstractDictionary):
    """Concatenate constant, data and trainable dictionaries together as [1, data, DicNN]"""
    def __init__(self, dic_trainable=DicNN, layer_sizes=[64, 64], n_psi_train=22):
        super(PsiNN, self).__init__()
        self.layer_sizes = layer_sizes
        self.dic_trainable = dic_trainable
        self.n_dic_customized = n_psi_train
        self.dicNN = self.dic_trainable(
            layer_sizes=self.layer_sizes,
            n_psi_train=self.n_dic_customized)
        
        # MindSpore 操作
        self.ones_like = ops.OnesLike()
        self.concat = ops.Concat(axis=-1)
        self.cast = ops.Cast()

    def construct(self, x):
        constant = self.ones_like(x[:, :1])
        psi_x_train = self.dicNN(x)
        outputs = self.concat((constant, x, psi_x_train))
        return outputs

class DicRBF(AbstractDictionary):
    """RBF based on notations in (https://en.wikipedia.org/wiki/Radial_basis_function)"""
    def __init__(self, rbf_number=100, regularizer=1e-4):
        self.n_dic_customized = rbf_number
        self.regularizer = regularizer
        self.centers = None

    def build(self, data):
        # 确保数据是numpy数组
        if isinstance(data, Tensor):
            data = data.asnumpy()
        self.centers, residual = kmeans(data, self.n_dic_customized)

    def construct(self, data):
        # 确保数据是numpy数组并转换为float32
        if isinstance(data, Tensor):
            data_np = data.asnumpy().astype(np.float32)
        else:
            data_np = np.array(data, dtype=np.float32)
            
        # 确保centers存在
        if self.centers is None:
            raise ValueError("RBF centers not initialized. Please call build() first.")
            
        rbfs = []
        for n in range(self.centers.shape[0]):
            # 计算距离
            center_expanded = self.centers[n:n+1, :]  # 保持二维
            r = scipy.spatial.distance.cdist(data_np, center_expanded)
            # 使用 RBF 核函数
            rbf = scipy.special.xlogy(r**2, r + self.regularizer)
            rbfs.append(rbf)

        # 转换为MindSpore张量，确保使用float32
        rbfs_array = np.array(rbfs, dtype=np.float32).T
        rbfs = Tensor(rbfs_array, dtype=ms.float32)
        rbfs = ops.Reshape()(rbfs, (data_np.shape[0], -1))

        # 创建常数项和数据张量，确保都是float32
        ones = ops.Ones()((rbfs.shape[0], 1), ms.float32)
        data_tensor = Tensor(data_np, dtype=ms.float32)
        
        # 拼接结果
        concat = ops.Concat(axis=-1)
        results = concat((ones, data_tensor, rbfs))
        return results
    
if __name__ == "__main__":
    # 设置随机种子以确保结果可重复
    np.random.seed(42)
    
    # 测试参数
    batch_size = 32
    input_dim = 64  # 修改为64维输入
    layer_sizes = [64, 64]
    n_psi_train = 22
    
    print("=== 网络结构分析 ===")
    
    # 1. 打印 DicNN 结构
    print("\n1. DicNN 网络结构:")
    dic_nn = DicNN(layer_sizes=layer_sizes, n_psi_train=n_psi_train)
    print(dic_nn)
    
    # 2. 打印 PsiNN 结构
    print("\n2. PsiNN 网络结构:")
    psi_nn = PsiNN(dic_trainable=DicNN, layer_sizes=layer_sizes, n_psi_train=n_psi_train)
    print(psi_nn)
    
    # 3. 打印 DicRBF 结构
    print("\n3. DicRBF 结构:")
    rbf_number = 10
    dic_rbf = DicRBF(rbf_number=rbf_number)
    print(f"RBF中心点数量: {rbf_number}")
    
    print("\n=== 开始测试网络功能 ===")
    
    # 1. 测试 DicNN
    print("\n1. 测试 DicNN:")
    test_input = Tensor(np.random.randn(batch_size, input_dim), dtype=ms.float32)
    output = dic_nn(test_input)
    print(f"输入维度: {test_input.shape}")
    print(f"输出维度: {output.shape}")
    print(f"预期输出维度: [{batch_size}, {n_psi_train}]")
    assert output.shape == (batch_size, n_psi_train), "DicNN输出维度不正确"
    print("DicNN测试通过！")
    
    # 2. 测试 PsiNN
    print("\n2. 测试 PsiNN:")
    test_input = Tensor(np.random.randn(batch_size, input_dim), dtype=ms.float32)
    output = psi_nn(test_input)
    expected_output_dim = 1 + input_dim + n_psi_train
    print(f"输入维度: {test_input.shape}")
    print(f"输出维度: {output.shape}")
    print(f"预期输出维度: [{batch_size}, {expected_output_dim}]")
    assert output.shape == (batch_size, expected_output_dim), "PsiNN输出维度不正确"
    print("PsiNN测试通过！")
    
    # 3. 测试 DicRBF
    print("\n3. 测试 DicRBF:")
    test_input = Tensor(np.random.randn(batch_size, input_dim), dtype=ms.float32)
    dic_rbf.build(test_input)
    output = dic_rbf.construct(test_input)
    expected_output_dim = 1 + input_dim + rbf_number
    print(f"输入维度: {test_input.shape}")
    print(f"输出维度: {output.shape}")
    print(f"预期输出维度: [{batch_size}, {expected_output_dim}]")
    assert output.shape == (batch_size, expected_output_dim), "DicRBF输出维度不正确"
    print("DicRBF测试通过！")
    
    # 4. 测试网络参数
    print("\n4. 测试网络参数:")
    print("\nDicNN参数统计:")
    total_params = 0
    print("各层参数:")
    for name, param in dic_nn.parameters_and_names():
        param_count = param.size
        total_params += param_count
        print(f"{name}: {param.shape}")
    print(f"总参数量: {total_params}")
    
    print("\nPsiNN参数统计:")
    total_params = 0
    print("各层参数:")
    for name, param in psi_nn.parameters_and_names():
        param_count = param.size
        total_params += param_count
        print(f"{name}: {param.shape}")
    print(f"总参数量: {total_params}")
    
    print("\n所有测试通过！网络结构和输出维度正确。")