import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import Parameter
from mindspore.common.initializer import Normal, XavierUniform
import numpy as np
# 添加本地模块导入，使用相对导入
try:
    from .dictionary import DicRBF, PsiNN
except ImportError:
    # 如果相对导入失败，尝试直接导入（单文件运行时）
    from dictionary import DicRBF, PsiNN

# 设置使用float64以保持与TensorFlow版本一致
ms.set_context(mode=ms.PYNATIVE_MODE, device_target='GPU')


class KoopmanGeneralSolver(object):
    '''
    Build the Koopman solver
    '''

    def __init__(self, dic, target_dim, reg=0.0):
        """Initializer

        :param dic: dictionary
        :type dic: class
        :param target_dim: dimension of the variable of the equation
        :type target_dim: int
        :param reg: the regularization parameter when computing K, defaults to 0.0
        :type reg: float, optional
        """
        self.dic = dic
        self.dic_func = dic.construct
        self.target_dim = target_dim
        self.reg = reg

    def separate_data(self, data):
        data_x = data[0]
        data_y = data[1]
        return data_x, data_y

    def build(self, data_train):
        # Separate data
        self.data_train = data_train
        self.data_x_train, self.data_y_train = self.separate_data(
            self.data_train)

        # Compute final information
        self.compute_final_info(reg_final=0.0)

    def compute_final_info(self, reg_final):
        # Compute K
        self.K = self.compute_K(self.dic_func,
                                self.data_x_train,
                                self.data_y_train,
                                reg=reg_final)
        self.eig_decomp(self.K)
        self.compute_mode()

    def eig_decomp(self, K):
        """ eigen-decomp of K """
        # Convert to numpy for eigenvalue decomposition
        if isinstance(K, Tensor):
            K_np = K.asnumpy()
        else:
            K_np = K
        self.eigenvalues, self.eigenvectors = np.linalg.eig(K_np)
        idx = self.eigenvalues.real.argsort()[::-1]
        self.eigenvalues = self.eigenvalues[idx]
        self.eigenvectors = self.eigenvectors[:, idx]
        self.eigenvectors_inv = np.linalg.inv(self.eigenvectors)

    def eigenfunctions(self, data_x):
        """ estimated eigenfunctions """
        psi_x = self.dic_func(data_x)
        # Convert to complex tensor for computation
        psi_x_complex = ops.Cast()(psi_x, ms.complex64)
        eigenvectors_tensor = Tensor(self.eigenvectors, dtype=ms.complex64)
        matmul = ops.MatMul()
        val = matmul(psi_x_complex, eigenvectors_tensor)
        return val

    def compute_mode(self):
        self.basis_func_number = self.K.shape[0]

        # Form B matrix
        self.B = self.dic.generate_B(self.data_x_train)

        # Compute modes
        eigenvectors_inv_tensor = Tensor(self.eigenvectors_inv, dtype=ms.complex64)
        B_tensor = Tensor(self.B, dtype=ms.complex64)
        matmul = ops.MatMul()
        transpose = ops.Transpose()
        modes_tensor = matmul(eigenvectors_inv_tensor, B_tensor)
        self.modes = transpose(modes_tensor, (1, 0))
        return self.modes

    def calc_psi_next(self, data_x, K):
        psi_x = self.dic_func(data_x)
        matmul = ops.MatMul()
        psi_next = matmul(psi_x, K)
        return psi_next

    def predict(self, x0, traj_len):
        """ predict the trajectory """
        # Ensure x0 is a proper MindSpore tensor
        if not isinstance(x0, Tensor):
            x0 = Tensor(x0, dtype=ms.float32)

        traj = [x0]
        for _ in range(traj_len - 1):
            x_curr = traj[-1]
            efunc = self.eigenfunctions(x_curr)

            # Convert eigenvalues to tensor and multiply
            eigenvalues_tensor = Tensor(self.eigenvalues, dtype=ms.complex64)
            efunc_scaled = eigenvalues_tensor * efunc

            # Matrix multiplication with modes
            transpose = ops.Transpose()
            matmul = ops.MatMul()
            x_next = matmul(self.modes, transpose(efunc_scaled, (1, 0)))

            # Get real part and transpose
            real = ops.Real()
            x_next_real = real(x_next)
            x_next_t = transpose(x_next_real, (1, 0))

            # Ensure x_next_t is a proper tensor before appending
            if not isinstance(x_next_t, Tensor):
                x_next_t = Tensor(x_next_t, dtype=ms.float32)

            traj.append(x_next_t)

        # Stack trajectory - ensure all elements are tensors
        stack = ops.Stack(axis=0)
        traj_tensor = stack(traj)

        # Transpose to get [batch, time, dim] format
        transpose = ops.Transpose()
        traj_permuted = transpose(traj_tensor, (1, 0, 2))

        # Squeeze to remove unnecessary dimensions
        squeeze = ops.Squeeze()
        return squeeze(traj_permuted)
    
    def compute_K(self, dic, data_x, data_y, reg):
        psi_x = dic(data_x)
        psi_y = dic(data_y)
        
        # Transpose operations
        transpose = ops.Transpose()
        psi_xt = transpose(psi_x, (1, 0))
        
        # Create identity matrix
        eye = ops.Eye()
        idmat = eye(psi_x.shape[-1], psi_x.shape[-1], ms.float32)
        
        # Matrix multiplications
        matmul = ops.MatMul()
        xtx = matmul(psi_xt, psi_x)
        
        # Regularization
        reg_tensor = Tensor(reg, dtype=ms.float32)
        xtx_reg = reg_tensor * idmat + xtx
        
        # Pseudo-inverse using numpy (since MindSpore might not have direct pinv)
        xtx_reg_np = xtx_reg.asnumpy()
        xtx_inv_np = np.linalg.pinv(xtx_reg_np)
        xtx_inv = Tensor(xtx_inv_np, dtype=ms.float32)
        
        # Final computation
        xty = matmul(psi_xt, psi_y)
        self.K_reg = matmul(xtx_inv, xty)
        return self.K_reg


class KoopmanDLSolver(KoopmanGeneralSolver):
    '''
    Build the Koopman model with dictionary learning
    '''

    def build_model(self):
        """Build model with trainable dictionary

        The loss function is ||Psi(y) - K Psi(x)||^2 .

        """
        class KoopmanModel(nn.Cell):
            def __init__(self, dic_func, target_dim):
                super(KoopmanModel, self).__init__()
                self.dic_func = dic_func
                self.K = nn.Dense(target_dim, target_dim, has_bias=False, 
                                weight_init='xavier_uniform')
                
            def construct(self, x, y):
                psi_x = self.dic_func(x)
                psi_y = self.dic_func(y)
                psi_next = self.K(psi_x)
                return psi_next - psi_y

        return KoopmanModel(self.dic_func, self.target_dim)

    def train_psi(self, model, epochs):
        """Train the trainable part of the dictionary

        :param model: koopman model
        :type model: model
        :param epochs: the number of training epochs before computing K for each inner training epoch
        :type epochs: int
        :return: history
        :rtype: history callback object
        """
        # Set model to training mode
        model.set_train(True)
        
        # Create optimizer
        optimizer = nn.Adam(model.trainable_params(), learning_rate=self.lr)
        
        # Create loss function
        criterion = nn.MSELoss()
        
        # Create training network
        class TrainOneStepCell(nn.Cell):
            def __init__(self, network, optimizer, loss_fn):
                super(TrainOneStepCell, self).__init__()
                self.network = network
                self.optimizer = optimizer
                self.loss_fn = loss_fn
                self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
                
            def construct(self, x, y, target):
                def forward_fn():
                    outputs = self.network(x, y)
                    loss = self.loss_fn(outputs, target)
                    return loss
                    
                loss = forward_fn()
                grads = self.grad(forward_fn, self.optimizer.parameters)()
                self.optimizer(grads)
                return loss
        
        # Convert data to tensors
        data_x_tensor = Tensor(self.data_x_train, dtype=ms.float32)
        data_y_tensor = Tensor(self.data_y_train, dtype=ms.float32)
        zeros = ops.Zeros()
        target_tensor = zeros(self.dic_func(data_y_tensor).shape, ms.float32)
        
        train_net = TrainOneStepCell(model, optimizer, criterion)
        
        loss_values = []
        for epoch in range(epochs):
            loss = train_net(data_x_tensor, data_y_tensor, target_tensor)
            loss_value = float(loss.asnumpy())
            loss_values.append(loss_value)
            
            if epoch % 10 == 0:
                print(f'Epoch {epoch}, Loss: {loss_value:.6f}')
        
        return {'loss': loss_values}

    def build(
            self,
            data_train,
            data_valid,
            epochs,
            batch_size,
            lr,
            log_interval,
            lr_decay_factor):
        # Separate training data
        self.data_train = data_train
        self.data_x_train, self.data_y_train = self.separate_data(
            self.data_train)

        self.data_valid = data_valid
        
        # Create zero tensors
        zeros = ops.Zeros()
        self.zeros_data_y_train = zeros(
            self.dic_func(Tensor(self.data_y_train, dtype=ms.float32)).shape, 
            ms.float32)
        self.zeros_data_y_valid = zeros(
            self.dic_func(Tensor(self.data_valid[1], dtype=ms.float32)).shape, 
            ms.float32)
        
        self.batch_size = batch_size
        self.lr = lr

        # Build the Koopman DL model
        self.model = self.build_model()

        # Training Loop
        losses = []
        for i in range(epochs):
            # One step for computing K
            self.K = self.compute_K(self.dic_func,
                                    self.data_x_train,
                                    self.data_y_train,
                                    self.reg)
            
            # Update K layer weights
            K_transposed = ops.Transpose()(self.K, (1, 0))
            self.model.K.weight.set_data(K_transposed)

            # Two steps for training PsiNN
            self.history = self.train_psi(self.model, epochs=2)

            print('number of the outer loop:', i)
            if i % log_interval == 0:
                losses.append(self.history['loss'][-1])

                # Adjust learning rate:
                if len(losses) > 2:
                    if losses[-1] > losses[-2]:
                        print("Error increased. Decay learning rate")
                        self.lr *= lr_decay_factor
                        # Note: MindSpore optimizer learning rate adjustment 
                        # needs to be handled differently

        # Compute final information
        self.compute_final_info(reg_final=0.01)


## test
def test_dl_solver():
    """Test Koopman dictionary learning solver"""
    # Generate data - 使用float64
    data_x_train = np.random.uniform(size=(50, 2)).astype(np.float32)
    data_y_train = np.random.uniform(size=(50, 2)).astype(np.float32)
    data_train = [data_x_train, data_y_train]

    data_x_valid = np.random.uniform(size=(20, 2)).astype(np.float32)
    data_y_valid = np.random.uniform(size=(20, 2)).astype(np.float32)
    data_valid = [data_x_valid, data_y_valid]

    # Build model
    basis_function = PsiNN(layer_sizes=[100, 100, 100], n_psi_train=22)
    dl_solver = KoopmanDLSolver(dic=basis_function,
                                target_dim=2,
                                reg=0.1)
    
    dl_solver.build(data_train=data_train,
                    data_valid=data_valid,
                    epochs=20,
                    batch_size=10,
                    lr=1e-3,
                    log_interval=5,
                    lr_decay_factor=0.8)

    # Test the shape of modes
    print(f"Final modes shape: {dl_solver.modes.shape}")
    print(f"Modes dtype: {dl_solver.modes.dtype}")

if __name__ == "__main__":
    test_dl_solver()