import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
import numpy as np

# -----------------------------
# 1. Coupling Flow Layer (ACF)
# -----------------------------
class CouplingFlowLayer(nn.Cell):
    def __init__(self, dim, hidden_dim=64):
        super().__init__()
        self.s = nn.SequentialCell([
            nn.Dense(dim // 2, hidden_dim),
            nn.ReLU(),
            nn.Dense(hidden_dim, dim // 2)
        ])
        self.t = nn.SequentialCell([
            nn.Dense(dim // 2, hidden_dim),
            nn.ReLU(),
            nn.Dense(hidden_dim, dim // 2)
        ])
        
        # MindSpore operations
        self.chunk = ops.Split(axis=1, output_num=2)
        self.concat = ops.Concat(axis=1)
        self.exp = ops.Exp()

    def construct(self, x):
        x1, x2 = self.chunk(x)
        s = self.s(x1)
        t = self.t(x1)
        y1 = x1
        y2 = (x2 + t) * self.exp(s)
        return self.concat([y1, y2])

    def inverse(self, y):
        y1, y2 = self.chunk(y)
        s = self.s(y1)
        t = self.t(y1)
        x1 = y1
        x2 = y2 * self.exp(-s) - t
        return self.concat([x1, x2])


# -----------------------------
# 2. CF-INN 构建（多层堆叠）
# -----------------------------
class CF_INN(nn.Cell):
    def __init__(self, dim, num_layers=4, hidden_dim=64):
        super().__init__()
        self.flows = nn.CellList([
            CouplingFlowLayer(dim, hidden_dim) for _ in range(num_layers)
        ])

    def construct(self, x):
        for flow in self.flows:
            x = flow(x)
        return x

    def inverse(self, y):
        for flow in reversed(self.flows):
            y = flow.inverse(y)
        return y


# -----------------------------
# 3. Koopman 算子估计器
# -----------------------------
class KoopmanOperatorEstimator:
    def __init__(self, reg=0.0):
        self.reg = reg
        self.K = None

    def fit(self, g_x, g_y):
        # Convert to numpy for computation
        if isinstance(g_x, Tensor):
            X = g_x.asnumpy()
        else:
            X = g_x
        if isinstance(g_y, Tensor):
            Y = g_y.asnumpy()
        else:
            Y = g_y
            
        XT = X.T
        dim = X.shape[1]

        idmat = np.eye(dim, dtype=X.dtype)
        XT_X = XT @ X
        reg_term = self.reg * idmat
        XT_X_reg_inv = np.linalg.pinv(XT_X + reg_term)
        K = XT_X_reg_inv @ XT @ Y
        self.K = Tensor(K, dtype=ms.float32)
        return self.K

    def predict(self, g0, steps):
        g_seq = [g0]
        matmul = ops.MatMul()
        for _ in range(1, steps):
            g_next = matmul(g_seq[-1], self.K)
            g_seq.append(g_next)
        stack = ops.Stack(axis=1)
        return stack(g_seq)


# -----------------------------
# 4. FlowDMD 模型结构
# -----------------------------
class FlowDMD(nn.Cell):
    def __init__(self, dim, num_layers=4, hidden_dim=64, reg=1e-4, device='CPU'):
        super().__init__()
        self.cfinn = CF_INN(dim, num_layers, hidden_dim)
        self.koopman = KoopmanOperatorEstimator(reg=reg)
        self.device = device
        # Initialize K matrix
        K_init = np.array([[ 0.8928, -0.0162],
                          [ 2.3104,  0.9484]], dtype=np.float32)
        self.K = Tensor(K_init, dtype=ms.float32)
        
        # MindSpore operations
        self.stack = ops.Stack()
        self.matmul = ops.MatMul()

    def construct(self, x):
        return self.cfinn(x)

    def inverse(self, y):
        return self.cfinn.inverse(y)

    def compute_loss(self, x_seq, alpha=1.0):
        # x_seq: [B, T, d]
        B, T, d = x_seq.shape
        
        # Transform each time step
        g_seq_list = []
        for t in range(T):
            g_t = self.construct(x_seq[:, t])
            g_seq_list.append(g_t)
        g_seq = self.stack(g_seq_list)  # [T, B, d]
        g_seq = ops.Transpose()(g_seq, (1, 0, 2))  # [B, T, d]
        
        # Prepare data for Koopman fitting
        reshape = ops.Reshape()
        g_x = reshape(g_seq[:, :-1], (-1, d))
        g_y = reshape(g_seq[:, 1:], (-1, d))

        # Fit Koopman operator
        self.K = self.koopman.fit(g_x, g_y)  # [d, d]
        
        # Predict sequence
        g_pred_seq = self.koopman.predict(g_seq[:, 0], T)  # [B, T, d]
        
        # Inverse transform predictions
        x_pred_seq_list = []
        for t in range(T):
            x_pred_t = self.inverse(g_pred_seq[:, t])
            x_pred_seq_list.append(x_pred_t)
        x_pred_seq = self.stack(x_pred_seq_list)  # [T, B, d]
        x_pred_seq = ops.Transpose()(x_pred_seq, (1, 0, 2))  # [B, T, d]

        # Compute losses
        mse_loss = nn.MSELoss()
        loss_linear = mse_loss(g_seq, g_pred_seq)
        loss_recon = mse_loss(x_seq, x_pred_seq)
        loss = loss_linear + alpha * loss_recon
        
        return loss, loss_linear, loss_recon

    def predict(self, data_x, traj_len):
        self.set_train(False)
        
        # Generate trajectory
        traj = [data_x]
        for _ in range(traj_len - 1):
            g_x = self.construct(data_x)
            g_next = self.matmul(g_x, self.K)
            x_next = self.inverse(g_next)
            traj.append(x_next)
            data_x = x_next  # Update for next time step
        
        # Stack trajectory into [B, T, d] tensor
        traj = self.stack(traj)  # [T, B, d]
        traj = ops.Transpose()(traj, (1, 0, 2))  # [B, T, d]
        
        return traj


# -----------------------------
# 5. 数据生成示例（固定点系统）
# -----------------------------
def generate_fixed_point(lambda_=0.9, mu=0.5, T=50):
    x0 = Tensor(np.random.rand(2) * 4 + 0.2, dtype=ms.float32)
    x_seq = [x0]
    for _ in range(T - 1):
        x = x_seq[-1]
        x1 = lambda_ * x[0]
        x2 = mu * x[1] + (lambda_**2 - mu) * x[0]**2
        x_next = Tensor([x1, x2], dtype=ms.float32)
        x_seq.append(x_next)
    stack = ops.Stack()
    return stack(x_seq)


# -----------------------------
# 6. 训练主函数
# -----------------------------
def train_flowdmd():
    model = FlowDMD(dim=2, num_layers=3, hidden_dim=32, reg=1e-4)
    optimizer = nn.Adam(model.trainable_params(), learning_rate=1e-3)
    
    # Create training network
    class TrainNet(nn.Cell):
        def __init__(self, network):
            super().__init__()
            self.network = network
            
        def construct(self, x_seq):
            loss, loss_lin, loss_rec = self.network.compute_loss(x_seq, alpha=1.0)
            return loss, loss_lin, loss_rec
    
    train_net = TrainNet(model)
    
    # Create training step
    class TrainOneStepCell(nn.Cell):
        def __init__(self, network, optimizer):
            super().__init__()
            self.network = network
            self.optimizer = optimizer
            self.grad_fn = ops.GradOperation(get_by_list=True, sens_param=True)
            
        def construct(self, x_seq):
            def forward_fn():
                loss, _, _ = self.network(x_seq)
                return loss
                
            loss, loss_lin, loss_rec = self.network(x_seq)
            grads = self.grad_fn(forward_fn, self.optimizer.parameters)()
            self.optimizer(grads)
            return loss, loss_lin, loss_rec
    
    train_step = TrainOneStepCell(train_net, optimizer)

    for epoch in range(10000):
        x_seq = generate_fixed_point()
        expand_dims = ops.ExpandDims()
        x_seq = expand_dims(x_seq, 0)  # [1, T, 2]
        
        loss, loss_lin, loss_rec = train_step(x_seq)

        if epoch % 100 == 0:
            print(f"Epoch {epoch} | Total: {loss.asnumpy():.6f} | Lin: {loss_lin.asnumpy():.6f} | Rec: {loss_rec.asnumpy():.6f}")
    
    return model


def train_flowdmd_xy(model, data_xy, epochs=500, lr=1e-3, alpha=1.0, log_interval=50):
    """
    基于 xy 对的 Koopman 训练函数

    :param model: FlowDMD 模型
    :param data_xy: [data_x, data_y] 格式的输入，分别为 x_t 和 x_{t+1}
    :param epochs: 训练轮数
    :param lr: 学习率
    :param alpha: 状态重建项权重
    :param log_interval: 每隔多少轮打印一次损失
    """
    data_x, data_y = data_xy
    assert data_x.shape == data_y.shape, "x 和 y 必须维度一致"
    
    # Convert to tensors if necessary
    if not isinstance(data_x, Tensor):
        data_x = Tensor(data_x, dtype=ms.float32)
    if not isinstance(data_y, Tensor):
        data_y = Tensor(data_y, dtype=ms.float32)

    optimizer = nn.Adam(model.trainable_params(), learning_rate=lr)
    mse_loss = nn.MSELoss()
    matmul = ops.MatMul()
    
    # Create training network
    class TrainNet(nn.Cell):
        def __init__(self, network, loss_fn, alpha):
            super().__init__()
            self.network = network
            self.loss_fn = loss_fn
            self.alpha = alpha
            self.matmul = ops.MatMul()
            
        def construct(self, data_x, data_y):
            # Forward computation
            g_x = self.network(data_x)          # g(x_t)
            g_y = self.network(data_y)          # g(x_{t+1})
            
            # Estimate Koopman operator K
            K = self.network.koopman.fit(g_x, g_y)
            self.network.K = K

            # Use K to advance one step
            g_y_pred = self.matmul(g_x, K)           # K g(x_t)
            x_y_pred = self.network.inverse(g_y_pred)  # g^{-1}(K g(x_t)) ≈ x_{t+1}

            # Loss terms
            loss_linear = self.loss_fn(g_y, g_y_pred)
            loss_recon = self.loss_fn(data_y, x_y_pred)
            loss = loss_linear + self.alpha * loss_recon
            
            return loss, loss_linear, loss_recon
    
    train_net = TrainNet(model, mse_loss, alpha)
    
    # Create training step
    class TrainOneStepCell(nn.Cell):
        def __init__(self, network, optimizer):
            super().__init__()
            self.network = network
            self.optimizer = optimizer
            self.grad_fn = ops.GradOperation(get_by_list=True)
            
        def construct(self, data_x, data_y):
            def forward_fn():
                loss, _, _ = self.network(data_x, data_y)
                return loss
                
            loss, loss_linear, loss_recon = self.network(data_x, data_y)
            grads = self.grad_fn(forward_fn, self.optimizer.parameters)()
            self.optimizer(grads)
            return loss, loss_linear, loss_recon
    
    train_step = TrainOneStepCell(train_net, optimizer)
    
    K = None
    for epoch in range(epochs):
        model.set_train(True)
        
        loss, loss_linear, loss_recon = train_step(data_x, data_y)
        K = model.K

        # Print logs
        if epoch % log_interval == 0:
            print(f"Epoch {epoch} | Total: {loss.asnumpy():.6f} | Linear: {loss_linear.asnumpy():.6f} | Recon: {loss_recon.asnumpy():.6f}")
    
    return K


def predict_flowdmd(model, data_x, traj_len):
    """
    使用 FlowDMD 模型进行预测

    :param model: 训练好的 FlowDMD 模型实例
    :param data_x: 输入数据 [B, d]，即起始状态
    :param traj_len: 预测的轨迹长度 T
    :return: 预测的轨迹 [B, T, d]
    """
    model.set_train(False)
    
    if not isinstance(data_x, Tensor):
        data_x = Tensor(data_x, dtype=ms.float32)
    
    # Generate trajectory
    traj = [data_x]
    matmul = ops.MatMul()
    
    for _ in range(traj_len - 1):
        g_x = model(data_x)
        g_next = matmul(g_x, model.K)
        x_next = model.inverse(g_next)
        traj.append(x_next)
        data_x = x_next  # Update for next time step
    
    # Stack trajectory into [B, T, d] tensor
    stack = ops.Stack(axis=1)
    traj = stack(traj)
    
    return traj


if __name__ == '__main__':
    # Set context
    ms.set_context(mode=ms.PYNATIVE_MODE, device_target='CPU')
    model = train_flowdmd() 