import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score, mean_squared_error

# 设备选择
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 神经网络结构
class PINN(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(PINN, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        x = torch.tanh(self.fc1(x))
        x = torch.tanh(self.fc2(x))
        x = self.fc3(x)
        return x

# 计算导数

def compute_derivatives(u, x, t):
    u_x = torch.autograd.grad(u.sum(), x, create_graph=True)[0]
    u_t = torch.autograd.grad(u.sum(), t, create_graph=True)[0]
    u_xx = torch.autograd.grad(u_x.sum(), x, create_graph=True)[0]
    return u_t, u_xx

# 损失函数

def compute_loss(model, alpha, x_f, t_f, x_ic, t_ic, u_ic, x_bc, t_bc, x_obs, t_obs, u_obs):
    # PDE残差点
    x_f = x_f.requires_grad_()
    t_f = t_f.requires_grad_()
    u_f = model(torch.cat([x_f, t_f], dim=1))
    u_t, u_xx = compute_derivatives(u_f, x_f, t_f)
    pde_residual = u_t - alpha * u_xx
    pde_loss = torch.mean(pde_residual ** 2)

    # 初始条件点
    u_ic_pred = model(torch.cat([x_ic, t_ic], dim=1))
    ic_loss = torch.mean((u_ic_pred - u_ic) ** 2)

    # 边界条件点
    u_bc_pred = model(torch.cat([x_bc, t_bc], dim=1))
    bc_loss = torch.mean(u_bc_pred ** 2)

    # 观测点损失
    u_obs_pred = model(torch.cat([x_obs, t_obs], dim=1))
    obs_loss = torch.mean((u_obs_pred - u_obs) ** 2)

    total_loss = pde_loss + ic_loss + bc_loss + obs_loss
    return total_loss, pde_loss, ic_loss, bc_loss, obs_loss

# 训练函数

def train(model, alpha, x_f, t_f, x_ic, t_ic, u_ic, x_bc, t_bc, x_obs, t_obs, u_obs, epochs, lr):
    optimizer = optim.Adam(list(model.parameters()) + [alpha], lr=lr)
    loss_hist, pde_hist, ic_hist, bc_hist, obs_hist, alpha_hist = [], [], [], [], [], []
    for epoch in range(epochs):
        optimizer.zero_grad()
        loss, pde_loss, ic_loss, bc_loss, obs_loss = compute_loss(
            model, alpha, x_f, t_f, x_ic, t_ic, u_ic, x_bc, t_bc, x_obs, t_obs, u_obs)
        loss.backward()
        optimizer.step()
        loss_hist.append(loss.item())
        pde_hist.append(pde_loss.item())
        ic_hist.append(ic_loss.item())
        bc_hist.append(bc_loss.item())
        obs_hist.append(obs_loss.item())
        alpha_hist.append(alpha.item())
        if epoch % 100 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.6f}, alpha: {alpha.item():.4f}")
    return loss_hist, pde_hist, ic_hist, bc_hist, obs_hist, alpha_hist

# 可视化损失和alpha

def plot_loss_alpha(loss_hist, pde_hist, ic_hist, bc_hist, obs_hist, alpha_hist):
    plt.figure(figsize=(12,5))
    plt.subplot(1,2,1)
    plt.plot(loss_hist, label='Total Loss')
    plt.plot(pde_hist, label='PDE Loss')
    plt.plot(ic_hist, label='IC Loss')
    plt.plot(bc_hist, label='BC Loss')
    plt.plot(obs_hist, label='Obs Loss')
    plt.yscale('log')
    plt.legend()
    plt.title('Loss Curve')
    plt.subplot(1,2,2)
    plt.plot(alpha_hist)
    plt.title('alpha Estimate')
    plt.xlabel('Epoch')
    plt.ylabel('alpha')
    plt.tight_layout()
    plt.show()

# 评估

def evaluate(model, alpha):
    x_test = torch.linspace(0, 1, 100, device=device).view(-1, 1)
    t_test = torch.linspace(0, 1, 100, device=device).view(-1, 1)
    X, T = torch.meshgrid(x_test.squeeze(), t_test.squeeze(), indexing='ij')
    x_flat = X.reshape(-1, 1)
    t_flat = T.reshape(-1, 1)
    input_test = torch.cat([x_flat, t_flat], dim=1)
    with torch.no_grad():
        u_pred = model(input_test).cpu().numpy().reshape(100, 100)
    u_true = np.sin(np.pi * X.cpu().numpy()) * np.exp(-alpha.item() * np.pi**2 * T.cpu().numpy())
    r2 = r2_score(u_true.flatten(), u_pred.flatten())
    rmse = np.sqrt(mean_squared_error(u_true.flatten(), u_pred.flatten()))
    print(f"R2: {r2:.4f}, RMSE: {rmse:.6f}")
    plt.figure(figsize=(12,5))
    plt.subplot(1,2,1)
    plt.imshow(u_true, extent=[0,1,0,1], origin='lower', aspect='auto')
    plt.title('Analytical Solution')
    plt.colorbar()
    plt.subplot(1,2,2)
    plt.imshow(u_pred, extent=[0,1,0,1], origin='lower', aspect='auto')
    plt.title('PINN Prediction')
    plt.colorbar()
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    # 真实参数
    alpha_true = 0.15
    # 网络参数
    input_dim = 2
    hidden_dim = 20
    output_dim = 1
    epochs = 2000
    lr = 0.001

    # 采样PDE残差点
    N_f = 1000
    x_f = torch.rand(N_f, 1, device=device)
    t_f = torch.rand(N_f, 1, device=device)

    # 初始条件点 (t=0)
    N_ic = 100
    x_ic = torch.linspace(0, 1, N_ic, device=device).view(-1, 1)
    t_ic = torch.zeros_like(x_ic, device=device)
    u_ic = torch.sin(np.pi * x_ic)

    # 边界条件点 (x=0, x=1)
    N_bc = 100
    t_bc = torch.linspace(0, 1, N_bc, device=device).view(-1, 1)
    x_bc_0 = torch.zeros_like(t_bc, device=device)
    x_bc_1 = torch.ones_like(t_bc, device=device)
    x_bc = torch.cat([x_bc_0, x_bc_1], dim=0)
    t_bc = torch.cat([t_bc, t_bc], dim=0)

    # 观测点（带噪声）
    N_obs = 200
    x_obs = torch.rand(N_obs, 1, device=device)
    t_obs = torch.rand(N_obs, 1, device=device)
    u_obs = torch.sin(np.pi * x_obs) * torch.exp(-alpha_true * np.pi**2 * t_obs)
    noise = 0.01 * torch.randn_like(u_obs, device=device)
    u_obs = u_obs + noise

    # 初始化模型和待优化参数alpha
    model = PINN(input_dim, hidden_dim, output_dim).to(device)
    alpha = torch.nn.Parameter(torch.tensor(0.1, dtype=torch.float32, requires_grad=True, device=device))

    # 训练
    loss_hist, pde_hist, ic_hist, bc_hist, obs_hist, alpha_hist = train(
        model, alpha, x_f, t_f, x_ic, t_ic, u_ic, x_bc, t_bc, x_obs, t_obs, u_obs, epochs, lr)
    print(f"反演得到的alpha: {alpha.item():.4f}, 真实alpha: {alpha_true}")
    plot_loss_alpha(loss_hist, pde_hist, ic_hist, bc_hist, obs_hist, alpha_hist)
    evaluate(model, alpha) 