import numpy as np
import torch
print("PyTorch version:", torch.__version__)
import torch.nn as nn
from model import FNN
from util import *
from train10 import *
from torch.autograd import Variable,grad
import time
import pyvista as pv
import argparse
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from torch.distributions import Normal
import matplotlib
import umap
from torchsummary import summary
from fvcore.nn import FlopCountAnalysis

matplotlib.use('Agg')  # 设置 matplotlib 使用 Agg 后端
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 18
torch.manual_seed(0)
def output_transform(X):
    X = T_range * nn.Softplus()(X) + T_ref
    return X

def input_transform(X):
# 归一化操作
    X = 2. * (X - X_min) / (X_max - X_min) - 1.

    return X
import pandas as pd
from scipy.interpolate import interp1d
import numpy as np
import numpy as np

# 提取 T_field 和 beta_field
T_field = np.array([1800.0, 1780.0, 1760.0, 1740.0, 1720.0, 1700.0, 1680.0, 1660.0, 1640.0, 1620.0, 1600.0, 1580.0, 1564.94, 1560.0, 1540.0])

beta_field = np.array([0.06228374122618696, 0.06010593423543086, 0.057829648503558054, 0.05558496841859181, 0.053373038951334655, 0.051195039075772525, 0.04905203265602734, 0.04694505596603666, 0.04487528670809849, 0.04284388941561078, 0.04085190084120591, 0.038931100344746804, 0.043702024268439874, 0.017187365534940335, 0.006011457154398098])

# 动态调整规则：T < 1254.69 时，β = 0
beta_field_dynamic = np.where(T_field >= 1540, beta_field, 0)

beta_interp_linear = interp1d(T_field, beta_field_dynamic, kind='linear', fill_value="extrapolate")
# 测试插值：输入任意温度点，计算 β(T)
T_test = np.array([1540, 1550, 1560, 1600, 1700, 1750, 1800])  # 测试温度点
# Test interpolation: arbitrary temperature points
beta_test_linear = beta_interp_linear(T_test)

# Ensure all values are non-negative (set negative values to zero)
beta_test_linear = np.maximum(beta_test_linear, 0)


# 输出结果
for t, b in zip(T_test, beta_test_linear):
    print(f"T = {t:.2f} k -> β(T) = {b:.6f}")
# 从 Excel 文件读取数据
file_path = "/home/mber_codes/ysl/TransformerPINN/PINN2/src/K-T.xlsx"  # 替换为你的文件路径
data = pd.read_excel(file_path)

# 提取温度和热导率数据
temperature = data.iloc[:, 0].values  # 第一列：温度
thermal_conductivity = data.iloc[:, 1].values  # 第二列：热导率
rho = data.iloc[:, 2].values  # 密度
specific_heat = data.iloc[:, 3].values  # 比热容

# 构建热导率插值函数
k_function = interp1d(temperature, thermal_conductivity, kind='cubic', fill_value="extrapolate")  # 填充外推值
rho_function = interp1d(temperature, rho, kind='cubic', fill_value="extrapolate")  # 填充外推值
Cp_function = interp1d(temperature, specific_heat, kind='cubic', fill_value="extrapolate")  # 填充外推值
def PDE(x, y, z, t, net):
    # 1. 拼接输入张量
    X = torch.cat([x, y, z, t], dim=-1)
    T = net(X)  # 模型输出温度场
    
    # 2. 计算时间和空间导数
    T_t = grad(T, t, create_graph=True, grad_outputs=torch.ones_like(T))[0]  # 时间导数
    T_x = grad(T, x, create_graph=True, grad_outputs=torch.ones_like(T))[0]  # x方向一阶导数
    T_xx = grad(T_x, x, create_graph=True, grad_outputs=torch.ones_like(T_x))[0]  # x方向二阶导数
    T_y = grad(T, y, create_graph=True, grad_outputs=torch.ones_like(T))[0]  # y方向一阶导数
    T_yy = grad(T_y, y, create_graph=True, grad_outputs=torch.ones_like(T_y))[0]  # y方向二阶导数
    T_z = grad(T, z, create_graph=True, grad_outputs=torch.ones_like(T))[0]  # z方向一阶导数
    T_zz = grad(T_z, z, create_graph=True, grad_outputs=torch.ones_like(T_z))[0]  # z方向二阶导数
    # 3. 动态计算热导率 (基于当前温度)
    T_numpy = T.detach().cpu().numpy()  # 将 PyTorch 张量转换为 NumPy
    k_numpy = k_function(T_numpy)  # 插值计算热导率
    rho_numpy = rho_function(T_numpy)
    Cp_numpy = Cp_function(T_numpy)
    k_tensor = torch.tensor(k_numpy, requires_grad=False, device=T.device)  # 转换为 PyTorch 张量
    rho_tensor = torch.tensor(rho_numpy, requires_grad=False, device=T.device)
    Cp_tensor = torch.tensor(Cp_numpy, requires_grad=False, device=T.device)
    
    # 4. 动态计算 β(T)
    # 使用插值方法获取动态 β 值
    beta_numpy = beta_interp_linear(T_numpy)
    beta_numpy[T_numpy < 1540] = 0  # 根据规则，当温度低于 1254.69 时，β = 0
    beta_tensor = torch.tensor(beta_numpy, requires_grad=False, device=T.device)  # 转换为 PyTorch 张量

    # 5. 计算热传导项
    conduction = k_tensor * (T_xx + T_yy + T_zz)  # 导热项
    
    # 6. 计算近似内部流动项
    grad_T_squared = T_x**2 + T_y**2 + T_z**2  # 计算 |∇T|^2
    internal_flow = -rho_tensor * Cp_tensor * beta_tensor * grad_T_squared  # 内部流动项（动态 β）
    convection =rho_tensor * Cp_tensor *U* (T_x+T_y+T_z)
    # 7. 计算PDE残差
    f = rho_tensor * Cp_tensor * T_t - conduction  + convection
    return f
import umap
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import umap
import matplotlib.pyplot as plt
import numpy as np
import umap
import torch
import numpy as np
import umap
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import umap
import matplotlib.pyplot as plt
import numpy as np
import umap
import torch
import numpy as np
def sampling_uniform(density, x_range, y_range, z_range=None, t_range=None, sample_type='domain', t=None):
    """
    Generates uniform sampling points in the specified range.
    """
    if z_range is None and t_range is None:
        # 2D sampling
        x = np.random.uniform(x_range[0], x_range[1], int(density))
        y = np.random.uniform(y_range[0], y_range[1], int(density))
        return np.stack([x, y], axis=-1), None
    elif t_range is None:
        # 3D sampling
        x = np.random.uniform(x_range[0], x_range[1], int(density))
        y = np.random.uniform(y_range[0], y_range[1], int(density))
        z = np.random.uniform(z_range[0], z_range[1], int(density))
        return np.stack([x, y, z], axis=-1), None
    else:
        # 4D sampling
        x = np.random.uniform(x_range[0], x_range[1], int(density))
        y = np.random.uniform(y_range[0], y_range[1], int(density))
        z = np.random.uniform(z_range[0], z_range[1], int(density))
        t = np.random.uniform(t_range[0], t_range[1], int(density))
        return np.stack([x, y, z, t], axis=-1), None
def generate_points(p=[], f=[]):
    t = np.linspace(x_min[3] + 0.01, x_max[3], 121)
    
    bound_x_neg, _ = sampling_uniform(2., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [t.min(), t.max()], sample_type='-x')
    bound_x_pos, _ = sampling_uniform(2., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [t.min(), t.max()], sample_type='+x')
    bound_y_neg, _ = sampling_uniform(2., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [t.min(), t.max()], sample_type='-y')
    bound_y_pos, _ = sampling_uniform(2., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [t.min(), t.max()], sample_type='+y')
    bound_z_neg, _ = sampling_uniform(2., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [t.min(), t.max()], sample_type='-z')
    bound_z_pos, _ = sampling_uniform(2., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [t.min(), t.max()], sample_type='+z')

    melt_pool_x_range = [x_min[0], x_max[0]]
    melt_pool_y_range = [x_min[1], x_max[1]]
    melt_pool_z_range = [x_min[2], x_max[2]]
    melt_pool_t_range = [x_min[3], x_max[3]]

    melt_pool_pts, _ = sampling_uniform(8., melt_pool_x_range, melt_pool_y_range, melt_pool_z_range, melt_pool_t_range, 'melt_pool')

    domain_pts, _ = sampling_uniform(4., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [x_min[3], x_max[3]], 'domain')
    init_pts, _ = sampling_uniform(4., [x_min[0], x_max[0]], [x_min[1], x_max[1]], [x_min[2], x_max[2]], [0, 0], 'domain')

    p.extend([
        torch.tensor(bound_x_neg, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(bound_x_pos, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(bound_y_neg, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(bound_y_pos, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(bound_z_neg, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(bound_z_pos, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(init_pts, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(domain_pts, requires_grad=True, dtype=torch.float).to(device),
        torch.tensor(melt_pool_pts, requires_grad=True, dtype=torch.float).to(device)
    ])
    
    f.extend([
        ['BC', '-x'], ['BC', '+x'], ['BC', '-y'], ['BC', '+y'], 
        ['BC', '-z'], ['BC', '+z'], ['IC', T_ref], ['domain'], ['melt_pool']
    ])
    
    return p, f
def compute_sigma(T):
    return np.maximum(2, 0.02 * T)  # 误差标准差 σ(T) = max(1, 0.01 * T)
def load_data(p, f, filename, num):
    data = np.load(filename)
    if num != 0:
        np.random.shuffle(data)
        data = data[0:num, :]
    p.extend([torch.tensor(data[:, 0:4], requires_grad=True, dtype=torch.float).to(device)])
    f.extend([['data', torch.tensor(data[:, 4:5], requires_grad=True, dtype=torch.float).to(device)]])
    return p, f

def BC(x, y, z, t, net, loc):
    X = torch.concat([x, y, z, t], axis=-1)
    T = net(X)
    T_numpy = T.detach().cpu().numpy()  # 将 PyTorch 张量转换为 NumPy
    k_numpy = k_function(T_numpy)       # 插值计算热导率
    k_tensor = torch.tensor(k_numpy, requires_grad=False, device=T.device)  # 转换为 PyTorch 张量
    if loc == '-x':
        T_x = grad(T, x, create_graph=True, grad_outputs=torch.ones_like(T))[0]
        return k_tensor * T_x - h * (T - T_ref) - Rboltz * emiss * (T**4 - T_ref**4)
    if loc == '+x':
        T_x = grad(T, x, create_graph=True, grad_outputs=torch.ones_like(T))[0]
        return -k_tensor * T_x - h * (T - T_ref) - Rboltz * emiss * (T**4 - T_ref**4)
    if loc == '-y':
        T_y = grad(T, y, create_graph=True, grad_outputs=torch.ones_like(T))[0]
        return k_tensor * T_y - h * (T - T_ref) - Rboltz * emiss * (T**4 - T_ref**4)
    if loc == '+y':
        T_y = grad(T, y, create_graph=True, grad_outputs=torch.ones_like(T))[0]
        return -k_tensor * T_y - h * (T - T_ref) - Rboltz * emiss * (T**4 - T_ref**4)
    if loc == '-z':
        T_t = grad(T, t, create_graph=True, grad_outputs=torch.ones_like(T))[0]
        return T_t
    if loc == '+z':
        T_z = grad(T, z, create_graph=True, grad_outputs=torch.ones_like(T))[0]
        q = 2 * P * eta / torch.pi / r**2 * torch.exp(-2 * (torch.square(x - 8 - v * t) + torch.square(y - 1)) / r**2) * (t <= t_end) * (t > 0)
        return -k_tensor * T_z - h * (T - T_ref) - Rboltz * emiss * (T**4 - T_ref**4) + q
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', type=str, default='cuda:1', help='GPU name')
    parser.add_argument('--output', type=str, default='bareplate', help='output filename')
    parser.add_argument('--T_ref', type=float, default=273.1, help='ambient temperature')
    parser.add_argument('--T_range', type=float, default=3000., help='temperature range')
    parser.add_argument('--xmax', type=float, default=115, help='max x')
    parser.add_argument('--xmin', type=float, default=0., help='min x')
    parser.add_argument('--ymax', type=float, default=2, help='max y')
    parser.add_argument('--ymin', type=float, default=0., help='min y')
    parser.add_argument('--zmax', type=float, default=25., help='max z')
    parser.add_argument('--zmin', type=float, default=0., help='min z')
    parser.add_argument('--tmax', type=float, default=47, help='max t')
    parser.add_argument('--tmin', type=float, default=0., help='min t')
    parser.add_argument('--x0', type=float, default=8, help='toolpath origin x')
    parser.add_argument('--y0', type=float, default=1, help='toolpath origin y')
    parser.add_argument('--z0', type=float, default=2, help='toolpath origin y')
    parser.add_argument('--r', type=float, default=1, help='beam radius')
    parser.add_argument('--v', type=float, default=14, help='scan speed')
    parser.add_argument('--t_end', type=float, default=6, help='laser stop time')
    parser.add_argument('--h', type=float, default=13, help='convection coefficient')
    parser.add_argument('--eta', type=float, default=.8, help='absorptivity')
    parser.add_argument('--P', type=float, default=600, help='laser power')
    parser.add_argument('--emiss', type=float, default=.8, help='emissivity')
    parser.add_argument('--lr', type=float, default=2e-4, help='learning rate')
    parser.add_argument('--iters', type=int, default=50000, help='number of iters')
    parser.add_argument('--data', type=str, default='.../train.npy', help='filename, default:None')
    parser.add_argument('--data_num', type=int, default=0, help='number of training data used, 0 for all data')
    parser.add_argument('--calib_eta', type=bool, default=False, help='calibrate eta')
    parser.add_argument('--calib_material', type=bool, default=False, help='calibrate cp and k')
    parser.add_argument('--valid', type=str, default='.../valid.npy', help='validation data file')
    parser.add_argument('--pretrain', type=str, default='None', help='pretrained model file')
    parser.add_argument('--T_inf', type=float, default=273.1, help='ambient temperature in Kelvin')
    parser.add_argument('--U', type=float, default=0.1, help='convection velocity')
    args = parser.parse_args()
    # 设置设备和参数
    device = torch.device("cuda:1")
    U = args.U

    x_max = np.array([args.xmax, args.ymax, args.zmax, args.tmax])
    x_min = np.array([args.xmin, args.ymin, args.zmin, args.tmin])
    X_max = torch.tensor(x_max, dtype=torch.float).to(device)
    X_min = torch.tensor(x_min, dtype=torch.float).to(device)

    r = args.r
    v = args.v
    t_end = args.t_end
    P = args.P
    eta = args.eta

    T_ref = args.T_ref
    T_range = args.T_range
    T_inf = args.T_inf
    h = args.h
    Rboltz = 5.6704e-14
    emiss = args.emiss

    data = np.load(args.valid)
    test_in = torch.tensor(data[:, 0:4], requires_grad=False, dtype=torch.float).to(device)
    test_out = data[:, 4:5]  # 取温度列

# 生成高斯噪声并加入测量误差
    noise = np.random.normal(0, compute_sigma(test_out), size=test_out.shape)
    test_out_noisy = test_out + noise  # 加入噪声后的温度数据

# 转换为张量
    test_out = torch.tensor(test_out_noisy, requires_grad=False, dtype=torch.float).to(device)
    lr = args.lr

    from sklearn.ensemble import RandomForestRegressor
    from skopt import gp_minimize
    from skopt.space import Real, Integer
    from functools import partial


# 贝叶斯优化目标函数
    def bayesian_objective(params, net, PDE, BC, point_sets, flags, inv_params, test_in, test_out):
        lr = params[0]
        w_bc, w_ic, w_pde, w_data = params[1:5]
        entropy_weight = params[5]
        energy_weight = params[6]
    # 使用少量迭代来评估当前参数的效果
        _, _, pde_loss, bc_loss, ic_loss, data_loss = train(
        net,
        lambda x, y, z, t, net=net: PDE(x, y, z, t, net),
        BC,
        point_sets,
        flags,
        iterations=100,  # 使用少量迭代评估参数
        lr=lr,
        info_num=100,
        w=[w_bc, w_ic, w_pde, w_data],
        inv_params=inv_params,
        entropy_weight_init=entropy_weight,
        energy_weight=energy_weight,
        test_in=test_in,
        test_out=test_out
    )
    
    # 返回目标值：总损失的均值（或其他目标）
        # 计算加权损失
        total_loss = (w_pde * np.sum(pde_loss)) + (w_bc * np.sum(bc_loss)) + (w_ic * np.sum(ic_loss)) + (w_data * np.sum(data_loss))

    # **正则化项**
    # L2正则化惩罚学习率
        l2_penalty = 1e-4 * (lr**2)
    
    # 对权重施加L2正则化
        l2_weight_penalty = 1e-4 * (w_bc**2 + w_ic**2 + w_pde**2 + w_data**2)
    
    # 惩罚不合理的参数范围（例如过大的学习率或者过小的权重）
        penalty_lr = 0
        if lr > 1e-2:
           penalty_lr += (lr - 1e-2)**2
        if lr < 1e-5:
           penalty_lr += (lr - 1e-5)**2

        penalty_weights = 0
        if w_bc > 1.0:
           penalty_weights += (w_bc - 1.0)**2
        if w_ic > 1.0:
           penalty_weights += (w_ic - 1.0)**2
        if w_pde > 1.0:
           penalty_weights += (w_pde - 1.0)**2
        if w_data > 1.0:
           penalty_weights += (w_data - 1.0)**2

    # 总正则化损失
        regularization_loss = l2_penalty + l2_weight_penalty + penalty_lr + penalty_weights

    # 最终目标函数：损失 + 正则化
        total_loss += regularization_loss

        return total_loss


# 定义贝叶斯优化搜索空间
    space = [
    Real(1e-5, 1e-2, name='lr'),        # 学习率
    Real(1E-4, 1.0, name='w_bc'),       # BC损失项权重
    Real(1E-4, 1.0, name='w_ic'),       # IC损失项权重
    Real(1E-4, 1.0, name='w_pde'),      # PDE损失项权重
    Real(1E-4, 1.0, name='w_data'),     # Data损失项权重
    Real(1E-4, 1.0, name='entropy_w'),  # 熵权重
    Real(1E-4, 1.0, name='energy_w') 
]
    device = torch.device("cuda:1")
# 初始化网络和数据
    net = FNN([4, 64, 64, 64, 1], activation=nn.Tanh(), in_tf=input_transform, out_tf=output_transform)
    net.to(device)
    if args.pretrain != 'None':
       net.load_state_dict(torch.load(args.pretrain))

    point_sets, flags = generate_points([], [])
    if args.data != 'None':
       point_sets, flags = load_data(point_sets, flags, args.data, args.data_num)

    inv_params = []
    if args.calib_eta:
       eta = torch.tensor(1e-5, requires_grad=True, device=device)
       inv_params.append(eta)

    if args.calib_material:
       Cp = torch.tensor(1e-5, requires_grad=True, device=device)
       inv_params.append(Cp)
       k = torch.tensor(1e-5, requires_grad=True, device=device)
       inv_params.append(k)

# 使用贝叶斯优化来寻找最优参数
    objective = partial(
    bayesian_objective, net=net, PDE=PDE, BC=BC, point_sets=point_sets,
    flags=flags, inv_params=inv_params, test_in=test_in, test_out=test_out
)
    
    print("Starting Bayesian Optimization...")
    res = gp_minimize(objective, space, n_calls=10, random_state=42)

# 输出优化结果
    best_lr = res.x[0]
    best_weights = res.x[1:5]
    best_entropy_w = res.x[5]
    best_energy_w = res.x[6]
    print(f"Best Learning Rate: {best_lr}")
    print(f"Best Weights: {best_weights}")
    print(f"Best Entropy Weight: {best_entropy_w}")
    print(f"Best Energy Weight: {best_energy_w}")
# 使用贝叶斯优化得到的参数进行最终训练
    l_history, err_history, pde_loss_history, bc_loss_history, ic_loss_history, data_loss_history = train(
    net,
    lambda x, y, z, t, net=net: PDE(x, y, z, t, net),
    BC,
    point_sets,
    flags,
    iterations=30000,  # 完整训练
    lr=best_lr,
    info_num=100,
    w=best_weights,
    inv_params=inv_params,
    entropy_weight_init=best_entropy_w,
    energy_weight=best_energy_w,
    test_in=test_in,
    test_out=test_out,
    warmup_steps=100 # 例如前1000步使用Adam，之后使用Lion
)

    torch.save(net.state_dict(), '.../PINN.pt')
    np.save('.../total_loss.npy', l_history)
    np.save('.../total_err.npy', err_history)
    np.save('.../pde_loss.npy', pde_loss_history)
    np.save('.../bc_loss.npy', bc_loss_history)
    np.save('.../ic_loss.npy', ic_loss_history)
    # 绘制对数图
    plt.figure(figsize=(10, 6))
    plt.plot(np.log(l_history), label='Total Loss', color='b')
    plt.plot(np.log(pde_loss_history), label='PDE Loss', color='r')
    plt.plot(np.log(bc_loss_history), label='BC Loss', color='g')
    plt.plot(np.log(ic_loss_history), label='IC Loss', color='y')
    plt.xlabel('Epochs')
    plt.ylabel('Log Loss')
    plt.legend()
    plt.grid(True)
    plt.savefig('loss_curves_log_scale.png')
import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import torch
import numpy as np
import time
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import numpy as np
import torch
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import matplotlib.pyplot as plt
import numpy as np
import torch
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# 设置 Matplotlib 后端为 Agg（不显示图像，只保存）
import torch
import numpy as np
import matplotlib
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import matplotlib.pyplot as plt

matplotlib.use('Agg')  # 设置 matplotlib 使用 Agg 后端
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 18
device = torch.device("cuda:1")
import torch
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import numpy as np
import torch
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import os
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

import os
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
import numpy as np
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import r2_score
from scipy.stats import pearsonr
from scipy.special import kl_div
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import r2_score
from scipy.stats import pearsonr
from scipy.special import kl_div
from scipy.stats import entropy
from scipy.interpolate import make_interp_spline  # 添加这行导入
matplotlib.use('Agg')  # 设置 matplotlib 使用 Agg 后端
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] =16
# 设置设备
device = torch.device("cuda:1")
net = FNN([4, 64, 64, 64, 1], activation=nn.Tanh(), in_tf=input_transform, out_tf=output_transform)
net.to(device)
net.load_state_dict(torch.load('.../PINN.pt'))

import numpy as np
import torch
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
import pandas as pd
import numpy as np
from scipy.interpolate import CubicSpline
t_measured = ...  # 测量时间点 (1-40s)
T_measured = ....
class DynamicTemperatureCalibrator:
    def __init__(self, window_size=3, smooth_factor=0.7):
        self.window_size = window_size
        self.smooth_factor = smooth_factor
        self.calib_factors = []
        self.last_valid_factor = 1.0
        
        # 创建高温计数据的插值函数
        # 使用样条插值
        self.pyro_interp = CubicSpline(t_measured, T_measured, bc_type='natural')
        
    def get_calibration_factor(self, t_current, T_pred_max):
        try:
            # 修改平滑因子，增强前期平滑效果
            self.smooth_factor = 0.95 - 0.25 * (t_current / 15) if t_current < 15 else 0.7
            # 获取当前时刻高温计测量值
            T_mea = float(self.pyro_interp(t_current))
            
            # 计算原始校正因子
            raw_factor = T_mea / (T_pred_max + 1e-6)
            
            # 动态窗口平滑
            self.calib_factors.append(raw_factor)
            if len(self.calib_factors) > self.window_size:
                self.calib_factors.pop(0)
                
            # 计算指数加权平均
            weights = np.exp(np.linspace(0, 1, len(self.calib_factors)))
            weights /= weights.sum()
            smooth_factor = np.dot(self.calib_factors, weights)
            
            # 应用平滑过渡
            final_factor = (self.smooth_factor * smooth_factor + 
                          (1 - self.smooth_factor) * self.last_valid_factor)
            
            # 限制校正幅度
            if t_current < 15:
               final_factor = np.clip(final_factor, 0.5, 2.0)  # 放宽限制
            else:
               final_factor = np.clip(final_factor, 0.8, 1.5)  # 后期收紧
            
            self.last_valid_factor = final_factor
            return final_factor, T_mea
        except:
            # 异常时返回最后有效因子
            return self.last_valid_factor, None

    def apply_calibration(self, temp_field, calib_factor):
        """应用校正到温度场"""
        calibrated = temp_field * calib_factor

        # 如果 temp_field 变成了一维，需要转换回三维 (x, y, z)
        if temp_field.ndim == 1:
           raise ValueError("temp_field is unexpectedly 1D. Check input dimensions.")
        elif temp_field.ndim == 2:  # 可能是 (x, z)，需要插入 y 维度
           temp_field = temp_field[:, np.newaxis, :]

        # 确保 calibrated 也有相同的维度
        calibrated = calibrated.reshape(temp_field.shape)

        # 计算梯度（只处理 x 和 z 方向）
        grad_x_ori = np.gradient(temp_field, axis=0, edge_order=2)  # x 方向
        grad_z_ori = np.gradient(temp_field, axis=2, edge_order=2)  # z 方向

        grad_x_cal = np.gradient(calibrated, axis=0, edge_order=2)
        grad_z_cal = np.gradient(calibrated, axis=2, edge_order=2)

        # 混合梯度 (保留原始梯度方向)
        alpha = 0.3
        grad_x = alpha * grad_x_cal + (1 - alpha) * grad_x_ori
        grad_z = alpha * grad_z_cal + (1 - alpha) * grad_z_ori

        # 重建温度场（仅修正x和z方向）
        calibrated += 0.1 * (grad_x + grad_z)

        return calibrated


# 初始化校正器
calibrator = DynamicTemperatureCalibrator(window_size=5, smooth_factor=0.7)

# 加载训练后的模型
net = FNN([4, 64, 64, 64, 1], nn.Tanh(), in_tf=input_transform, out_tf=output_transform)  
net.to(device)
net.load_state_dict(torch.load('.../PINN.pt'))
