# X-TFC
import deepxde as dde
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import grad
import matplotlib.pyplot as plt
import time

# 配置DeepXDE使用双精度浮点数和PyTorch后端
dde.config.set_default_float('float64')
dde.backend.set_default_backend("pytorch")


# Chebyshev神经网络模型
class ChebyshevNN(nn.Module):
    def __init__(self, degree=2):
        super().__init__()
        self.degree = degree # Chebyshev多项式阶数
        # 输出层：将(degree+1)^2维特征映射到1维输出，无偏置项
        self.output_layer = nn.Linear((degree + 1) ** 2, 1, bias=False)

    def chebyshev_features(self, x):
        x1 = x[:, 0]
        x2 = x[:, 1]

        # 生成x1的Chebyshev多项式
        Tx1 = [torch.ones_like(x1)] # 初始化为0阶多项式
        if self.degree >= 1:
            Tx1.append(x1) # 1阶多项式
            # 递归生成高阶多项式
            for n in range(2, self.degree + 1):
                Tx1.append(2 * x1 * Tx1[n - 1] - Tx1[n - 2])
        Tx1 = torch.stack(Tx1, dim=1) # 沿列方向堆叠

        # 生成x2的Chebyshev多项式
        Tx2 = [torch.ones_like(x2)]
        if self.degree >= 1:
            Tx2.append(x2)
            for n in range(2, self.degree + 1):
                Tx2.append(2 * x2 * Tx2[n - 1] - Tx2[n - 2])
        Tx2 = torch.stack(Tx2, dim=1)

        # 计算张量积特征：将两变量的多项式基进行笛卡尔积组合
        features = torch.einsum('bi,bj->bij', Tx1, Tx2).view(-1, (self.degree + 1) ** 2)
        return features

    def forward(self, x):
        features = self.chebyshev_features(x) # 生成特征
        return self.output_layer(features) # 线性组合输出


# 约束表达式V = g(x) - g(0)
def constraint_expression(x, model):
    g_x = model(x) # 模型在x处的输出
    x0 = torch.zeros_like(x) # 原点坐标
    g_0 = model(x0)  # 模型在原点处的输出
    return g_x - g_0  # 满足V(0)=0的约束


# HJB残差计算
def hjb_residual(x, model):
    x.requires_grad_(True) # 启用梯度追踪
    V = constraint_expression(x, model) # 计算V值

    # 计算V对x的梯度（一阶导数）
    V_x = grad(V, x, grad_outputs=torch.ones_like(V),
               create_graph=True, retain_graph=True)[0]
    V_x1 = V_x[:, 0:1]
    V_x2 = V_x[:, 1:2]

    x1 = x[:, 0:1]
    x2 = x[:, 1:2]

    # HJB方程残差计算（根据具体方程形式）
    residual = (x1 ** 2 + x2 ** 2 +
                (-x1 + x2) * V_x1 +
                (-0.5 * x1 - 0.5 * x2 + 0.5 * x1 ** 2 * x2) * V_x2 -
                0.25 * (x1 ** 2) * (V_x2) ** 2)
    return residual


# 生成均匀分布的训练数据
def generate_data(num_points):
    x = np.linspace(-1, 1, int(np.sqrt(num_points))) # x轴等间距采样
    y = np.linspace(-1, 1, int(np.sqrt(num_points))) # y轴等间距采样
    X, Y = np.meshgrid(x, y) # 生成网格坐标
    return np.hstack((X.reshape(-1, 1), Y.reshape(-1, 1))) # 合并为二维坐标点


# 训练参数
num_points = 4900 # 总训练点数(70x70网格)
degree = 2 # Chebyshev多项式阶数
epochs = 50 # 训练轮数

# 初始化模型
model = ChebyshevNN(degree=degree) # 创建模型实例
train_data = torch.tensor(generate_data(num_points), dtype=torch.float64) # 生成训练数据


# 定义损失函数（均方残差）
def loss_fn(model):
    residuals = hjb_residual(train_data, model) # 计算所有点的残差
    return torch.mean(residuals ** 2) # 返回均方误差


# 使用LBFGS优化器（拟牛顿法）
optimizer = torch.optim.LBFGS(
    model.output_layer.parameters(), # 只优化输出层权重
    lr=1, # 学习率
    max_iter=1000, # 每步最大迭代次数
    line_search_fn='strong_wolfe' # 线性搜索方法
)

# 训练循环
start_time = time.time()  # 开始计时
for epoch in range(epochs):
    def closure():
        optimizer.zero_grad() # 清空梯度
        loss = loss_fn(model) # 计算损失
        loss.backward() # 反向传播
        print(f"Epoch {epoch}, Loss: {loss.item()}") # 打印训练信息
        return loss


    optimizer.step(closure) # LBFGS优化步骤

end_time = time.time()  # 记录训练结束时间
total_time = end_time - start_time
print(f"\nTotal training time: {total_time:.2f} seconds")

# 精确解函数
def exact_solution(x):
    x1, x2 = x[:, 0], x[:, 1]
    V = 0.5 * x1 ** 2 + x2 ** 2 # 真实V的表达式
    u = -x1 * x2 # 真实u的表达式
    return V, u


# 测试误差计算
test_points = generate_data(100) # 10x10网格
test_tensor = torch.tensor(test_points, dtype=torch.float64)

# 计算V预测值
V_pred = constraint_expression(test_tensor, model).detach().numpy()
V_true, u_true = exact_solution(test_points) # 获取真实值

# 计算控制量u预测值
test_tensor.requires_grad = True
V_test = constraint_expression(test_tensor, model)
V_x = grad(V_test, test_tensor, grad_outputs=torch.ones_like(V_test))[0] # 计算梯度
u_pred = (-0.5 * test_tensor[:, 0] * V_x[:, 1]).detach().numpy() # 根据梯度计算u


#--------可视化---------
# 将数据转换为网格格式
X = test_points[:, 0].reshape(10, 10) # x坐标网格
Y = test_points[:, 1].reshape(10, 10) # y坐标网格

# 重塑预测和真实值为网格形状（二维数组）
V_pred_2d = V_pred.reshape(10, 10)
V_true_2d = V_true.reshape(10, 10)
u_pred_2d = u_pred.reshape(10, 10)
u_true_2d = u_true.reshape(10, 10)

# 创建2个子图的画布
fig = plt.figure(figsize=(12, 7))

# 绘制V_pred
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
surf1 = ax1.plot_surface(X, Y, V_pred_2d, cmap='autumn', edgecolor='none') # 秋季
ax1.set_xlabel('x1')
ax1.set_ylabel('x2')
ax1.set_zlabel('V')
ax1.set_title('X-TFC V')
fig.colorbar(surf1, ax=ax1, shrink=0.5, aspect=5)

# 绘制V_true
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
surf2 = ax2.plot_surface(X, Y, V_true_2d, cmap='winter', edgecolor='none') # 冬季
ax2.set_xlabel('x1')
ax2.set_ylabel('x2')
ax2.set_zlabel('V')
ax2.set_title('exact V')
fig.colorbar(surf2, ax=ax2, shrink=0.5, aspect=5)

plt.tight_layout()  # 自动调整子图间距
plt.show()

fig = plt.figure(figsize=(12, 7))

# 绘制u_pred
ax3 = fig.add_subplot(1, 2, 1, projection='3d')
surf3 = ax3.plot_surface(X, Y, u_pred_2d, cmap='summer', edgecolor='none')  # 夏季summer
ax3.set_xlabel('x1')
ax3.set_ylabel('x2')
ax3.set_zlabel('u')
ax3.set_title('X-TFC u')
fig.colorbar(surf3, ax=ax3, shrink=0.5, aspect=5)

# 绘制u_true
ax4 = fig.add_subplot(1, 2, 2, projection='3d')
surf4 = ax4.plot_surface(X, Y, u_true_2d, cmap='spring', edgecolor='none')  # 春季
ax4.set_xlabel('x1')
ax4.set_ylabel('x2')
ax4.set_zlabel('u')
ax4.set_title('exact u')
fig.colorbar(surf4, ax=ax4, shrink=0.5, aspect=5)

plt.tight_layout()  # 自动调整子图间距
plt.show()

# 添加2D可视化部分
# 创建V的2D对比图
plt.figure(figsize=(12, 5))

# V预测热图
plt.subplot(1, 2, 1)
contour = plt.contourf(X, Y, V_pred_2d, levels=20, cmap='viridis')
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('X-TFC V')
plt.colorbar(contour, shrink=0.5, aspect=5)
plt.scatter(X, Y, c='white', s=10, alpha=0.3)  # 添加数据点分布

# V真实热图
plt.subplot(1, 2, 2)
contour = plt.contourf(X, Y, V_true_2d, levels=20, cmap='plasma')
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('Exact V')
plt.colorbar(contour, shrink=0.5, aspect=5)
plt.scatter(X, Y, c='white', s=10, alpha=0.3)

plt.tight_layout()
plt.show()

# 创建u的2D对比图
plt.figure(figsize=(12, 5))

# u预测热图
plt.subplot(1, 2, 1)
contour = plt.contourf(X, Y, u_pred_2d, levels=20, cmap='RdYlBu') #coolwarm
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('X-TFC u')
plt.colorbar(contour, shrink=0.5, aspect=5)
plt.scatter(X, Y, c='black', s=10, alpha=0.3)

# u真实热图
plt.subplot(1, 2, 2)
contour = plt.contourf(X, Y, u_true_2d, levels=20, cmap='RdYlBu')
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('Exact u')
plt.colorbar(contour, shrink=0.5, aspect=5)
plt.scatter(X, Y, c='black', s=10, alpha=0.3)

plt.tight_layout()
plt.show()

# 添加误差分布可视化
plt.figure(figsize=(12, 5))

# V误差分布
plt.subplot(1, 2, 1)
error_V = np.abs(V_pred_2d - V_true_2d)
contour = plt.contourf(X, Y, error_V, levels=20, cmap='hot')
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('V Absolute error ')
plt.colorbar(contour, shrink=0.5, aspect=5)

# u误差分布
plt.subplot(1, 2, 2)
error_u = np.abs(u_pred_2d - u_true_2d)
contour = plt.contourf(X, Y, error_u, levels=20, cmap='Oranges')
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('u Absolute error ')
plt.colorbar(contour, shrink=0.5, aspect=5)

plt.tight_layout()
plt.show()

# 输出误差
print("\n==== 误差分析 ====")
print(f"V最大绝对误差: {np.max(np.abs(V_pred_2d - V_true_2d)):.2e}")
print(f"V平均绝对误差: {np.mean(np.abs(V_pred_2d - V_true_2d)):.2e}")
print(f"u最大绝对误差: {np.max(np.abs(u_pred_2d - u_true_2d)):.2e}")
print(f"u平均绝对误差: {np.mean(np.abs(u_pred_2d - u_true_2d)):.2e}")