import torch
import torch.nn as nn
import torch.nn.functional as F

# -----------------------------
# 1. Coupling Flow Layer (ACF)
# -----------------------------


class CouplingFlowLayer(nn.Module):
    def __init__(self, dim, hidden_dim=64):
        super().__init__()
        self.s = nn.Sequential(
            nn.Linear(dim // 2, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, dim // 2)
        )
        self.t = nn.Sequential(
            nn.Linear(dim // 2, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, dim // 2)
        )

    def forward(self, x):
        x1, x2 = torch.chunk(x, 2, dim=1)
        s = self.s(x1)
        t = self.t(x1)
        y1 = x1
        y2 = (x2 + t) * torch.exp(s)
        return torch.cat([y1, y2], dim=1)

    def inverse(self, y):
        y1, y2 = torch.chunk(y, 2, dim=1)
        s = self.s(y1)
        t = self.t(y1)
        x1 = y1
        x2 = y2 * torch.exp(-s) - t
        return torch.cat([x1, x2], dim=1)


# -----------------------------
# 2. CF-INN 构建（多层堆叠）
# -----------------------------
class CF_INN(nn.Module):
    def __init__(self, dim, num_layers=4, hidden_dim=64):
        super().__init__()
        self.flows = nn.ModuleList([
            CouplingFlowLayer(dim, hidden_dim) for _ in range(num_layers)
        ])

    def forward(self, x):
        for flow in self.flows:
            x = flow(x)
        return x

    def inverse(self, y):
        for flow in reversed(self.flows):
            y = flow.inverse(y)
        return y


# -----------------------------
# 3. Koopman 算子估计器
# -----------------------------
class KoopmanOperatorEstimator:
    def __init__(self, reg=0.0):
        self.reg = reg

    def fit(self, g_x, g_y):
        X = g_x
        Y = g_y
        XT = X.T
        dim = X.shape[1]

        idmat = torch.eye(dim, dtype=X.dtype, device=X.device)
        XT_X = XT @ X
        reg_term = self.reg * idmat
        XT_X_reg_inv = torch.linalg.pinv(XT_X + reg_term)
        K = XT_X_reg_inv @ XT @ Y
        self.K = K
        return self.K

    def predict(self, g0, steps):
        g_seq = [g0]
        for _ in range(1, steps):
            g_next = g_seq[-1] @ self.K
            g_seq.append(g_next)
        return torch.stack(g_seq, dim=1)


# -----------------------------
# 4. FlowDMD 模型结构
# -----------------------------
class FlowDMD(nn.Module):
    def __init__(self, dim, num_layers=4, hidden_dim=64, reg=1e-4, device='cpu', num_dictionaries=22):
        super().__init__()
        self.dim = dim
        self.num_dictionaries = num_dictionaries
        self.dictionaries = nn.ModuleList([
            CF_INN(dim, num_layers, hidden_dim) for _ in range(num_dictionaries)
        ])
        self.koopman = KoopmanOperatorEstimator(reg=reg)
        self.device = device
        self.K = torch.tensor([[3.3914, -3.2373],
                               [1.9170, -1.5767]])
        self.dictionary_weights = nn.Parameter(
            torch.ones(num_dictionaries) / num_dictionaries)

    def forward(self, x):
        outputs = []
        for dic in self.dictionaries:
            outputs.append(dic(x))
        # 加权组合所有字典的输出
        weighted_output = torch.zeros_like(outputs[0])
        for i, output in enumerate(outputs):
            weighted_output += self.dictionary_weights[i] * output
        return weighted_output

    def inverse(self, y):
        # 对每个字典进行逆变换
        outputs = []
        for dic in self.dictionaries:
            outputs.append(dic.inverse(y))
        # 加权组合所有字典的逆变换输出
        weighted_output = torch.zeros_like(outputs[0])
        for i, output in enumerate(outputs):
            weighted_output += self.dictionary_weights[i] * output
        return weighted_output

    def compute_loss(self, x_seq, alpha=1.0):
        B, T, d = x_seq.shape
        g_seq = torch.stack([self.forward(x_seq[:, t])
                            for t in range(T)], dim=1)
        g_x = g_seq[:, :-1].reshape(-1, d)
        g_y = g_seq[:, 1:].reshape(-1, d)

        self.K = self.koopman.fit(g_x, g_y)
        g_pred_seq = self.koopman.predict(g_seq[:, 0], T)
        x_pred_seq = torch.stack(
            [self.inverse(g_pred_seq[:, t]) for t in range(T)], dim=1)

        loss_linear = F.mse_loss(g_seq, g_pred_seq)
        loss_recon = F.mse_loss(x_seq, x_pred_seq)

        # 添加字典权重正则化项，确保权重和为1且非负
        weight_reg = F.mse_loss(
            torch.sum(self.dictionary_weights), torch.tensor(1.0))
        weight_pos = torch.mean(F.relu(-self.dictionary_weights))

        loss = loss_linear + alpha * loss_recon + 0.1 * weight_reg + 0.1 * weight_pos
        return loss, loss_linear, loss_recon

    def predict(self, data_x, traj_len):
        self.eval()
        data_x = data_x.to(self.device)

        traj = [data_x]
        for _ in range(traj_len - 1):
            g_x = self(data_x)
            g_next = self.inverse(g_x @ self.K)
            traj.append(g_next)
            data_x = g_next

        traj = torch.stack(traj, dim=1)
        return traj


# -----------------------------
# 5. 数据生成示例（固定点系统）
# -----------------------------
def generate_fixed_point(lambda_=0.9, mu=0.5, T=50):
    x0 = torch.rand(2) * 4 + 0.2
    x_seq = [x0]
    for _ in range(T - 1):
        x = x_seq[-1]
        x1 = lambda_ * x[0]
        x2 = mu * x[1] + (lambda_**2 - mu) * x[0]**2
        x_next = torch.tensor([x1, x2])
        x_seq.append(x_next)
    return torch.stack(x_seq)


# -----------------------------
# 6. 训练主函数
# -----------------------------
def train_flowdmd():
    model = FlowDMD(dim=2, num_layers=3, hidden_dim=32, reg=1e-4)
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

    for epoch in range(10000):
        x_seq = generate_fixed_point().unsqueeze(0)  # [1, T, 2]
        loss, loss_lin, loss_rec = model.compute_loss(x_seq, alpha=1.0)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if epoch % 100 == 0:
            print(
                f"Epoch {epoch} | Total: {loss.item():.6f} | Lin: {loss_lin.item():.6f} | Rec: {loss_rec.item():.6f}")
    return model


def train_flowdmd_xy(model, data_xy, epochs=500, lr=1e-3, alpha=1.0, log_interval=50):
    """
    基于 xy 对的 Koopman 训练函数，支持多字典模型

    :param model: FlowDMD 模型
    :param data_xy: [data_x, data_y] 格式的输入，分别为 x_t 和 x_{t+1}
    :param epochs: 训练轮数
    :param lr: 学习率
    :param alpha: 状态重建项权重
    :param log_interval: 每隔多少轮打印一次损失
    """
    data_x, data_y = data_xy
    assert data_x.shape == data_y.shape, "x 和 y 必须维度一致"

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    K = 0
    for epoch in range(epochs):
        model.train()

        # 前向计算
        g_x = model(data_x)          # g(x_t)
        g_y = model(data_y)          # g(x_{t+1})

        # 估计 Koopman 算子 K
        K = model.koopman.fit(g_x, g_y)
        model.K = K

        # 用 K 推进一步
        g_y_pred = g_x @ K           # K g(x_t)
        x_y_pred = model.inverse(g_y_pred)  # g^{-1}(K g(x_t)) ≈ x_{t+1}

        # 损失项
        loss_linear = F.mse_loss(g_y, g_y_pred)
        loss_recon = F.mse_loss(data_y, x_y_pred)

        # 字典权重正则化
        weight_reg = F.mse_loss(
            torch.sum(model.dictionary_weights), torch.tensor(1.0))
        weight_pos = torch.mean(F.relu(-model.dictionary_weights))

        loss = loss_linear + alpha * loss_recon + 0.1 * weight_reg + 0.1 * weight_pos

        # 优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 打印日志
        if epoch % log_interval == 0:
            print(
                f"Epoch {epoch} | Total: {loss.item():.6f} | Linear: {loss_linear.item():.6f} | Recon: {loss_recon.item():.6f}")
            # print(f"Dictionary weights: {model.dictionary_weights.detach().numpy()}")
    return K


def predict_flowdmd(model, data_x, traj_len):
    """
    使用 FlowDMD 模型进行预测

    :param model: 训练好的 FlowDMD 模型实例
    :param data_x: 输入数据 [B, d]，即起始状态
    :param traj_len: 预测的轨迹长度 T
    :return: 预测的轨迹 [B, T, d]
    """
    model.eval()
    data_x = data_x.to(model.device)

    # 生成轨迹
    traj = [data_x]
    for _ in range(traj_len - 1):
        g_x = model(data_x)
        g_next = model.inverse(g_x @ model.K)
        traj.append(g_next)
        data_x = g_next  # 更新为下一时刻的状态

    # 将轨迹堆叠成一个 [B, T, d] 形状的张量
    traj = torch.stack(traj, dim=1)

    return traj


if __name__ == '__main__':
    train_flowdmd()
