#Physics-Driven
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, TensorDataset
import time  # For timing measurements
import pandas as pd
import os
import math
from torch.optim.lr_scheduler import StepLR
import torch.nn.functional as F

# 设备配置
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
file_path = 'processed_data.csv'

# 物理参数 - 根据成年男子小臂尺寸调整
L = 2.5  # 小臂长度约2.5dm
T = 2.0   # 测试时间窗口
A0 = 4.52e-2  # 参考横截面积(dm²)
beta = 1134.37  # 弹性参数
rho = 1.06 # 血液密度(kg/l³)
P_ext = 67.576  # 外部压力(mmHg)
Kr = 0.251   # 阻力系数
R1, R2 = 87.85, 8.41e2  # Windkessel参数 
C = 1.345e-2     # 顺应性
delta = 0.75 # 心跳周期(秒)，约对应77.1bpm心率
sample_rate = 125  # Hz
train_point = 7000 #从2800s开始PPG展现出周期性    
test_point = 98



class PeakAttention(nn.Module):
    """波峰/波谷注意力模块"""
    def __init__(self, channels):
        super().__init__()
        self.conv = nn.Conv1d(channels, 1, kernel_size=3, padding=1)
        
    def forward(self, x):
        # 生成注意力权重 (B,1,T)
        attn = torch.sigmoid(self.conv(x))  
        return x * attn  # 增强波峰/波谷区域

class DilatedResidualConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, repeat=4, dilation_base=2, num_plain=1):
        super().__init__()
        self.blocks = nn.Sequential(*[
            nn.Sequential(
                nn.Conv1d(
                    in_channels if i == 0 else out_channels,
                    out_channels,
                    kernel_size=3,
                    padding=(1 if i < num_plain else dilation_base**(i - num_plain)),
                    dilation=(1 if i < num_plain else dilation_base**(i - num_plain))
                ),
                nn.BatchNorm1d(out_channels),
                nn.ReLU()
            ) for i in range(repeat)
        ])
        self.shortcut = (
            nn.Identity() if in_channels == out_channels
            else nn.Conv1d(in_channels, out_channels, kernel_size=1)
        )

    def forward(self, x):
        shortcut = self.shortcut(x)
        x = self.blocks(x)
        return x + shortcut

class BranchNet(nn.Module):
    def __init__(self, in_channels=2):
        super().__init__()
        # 初始特征提取
        self.prep = nn.Conv1d(in_channels, 64, kernel_size=1)

        # 多层卷积
        self.stage1 = DilatedResidualConvBlock(64, 64, repeat=3, dilation_base=2, num_plain=1)
        self.stage2 = DilatedResidualConvBlock(64, 128, repeat=4, dilation_base=2, num_plain=1)
        self.stage3 = DilatedResidualConvBlock(128, 256, repeat=4, dilation_base=2, num_plain=2)
        self.stage4 = DilatedResidualConvBlock(256, 512, repeat=4, dilation_base=2, num_plain=2)
        self.peak_attn = PeakAttention(512)
        # 双向LSTM
        self.bilstm = nn.LSTM(512, 512, num_layers=2, batch_first=True, bidirectional=True)

        # 输出层
        self.fc_out = nn.Linear(1024, 2)  # 输出两个向量 b1, b2

    def forward(self, signal):  # signal: (B, C=2, T)
        # 特征提取
        x = self.prep(signal)   # (B, 64, T)
        x = self.stage1(x)
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.stage4(x)      # (B, 512, T)
        x = self.peak_attn(x)  
        
        # LSTM 时序建模
        x = x.permute(0, 2, 1)  # (B, T, 512)
        x, _ = self.bilstm(x)   # (B, T, 1024)
        
        # 全局平均池化
        x = x.mean(dim=1)       # (B, 1024)
        
        # 输出投影，得到两个向量
        x = self.fc_out(x)      # (B, 2)
        
        # 分成两个向量 b1 和 b2，每个大小为 (B, 1)
        return x.chunk(2, dim=1)  # 返回 b1 和 b2，每个 (B, 1)

# 主干网络（处理时空坐标）
class TrunkNet(nn.Module):
    def __init__(self, num_freq=3, hidden_dim=128):
        super().__init__()
        self.num_freq = num_freq
        # 输入特征：z + 周期性时间编码
        self.input_layer = nn.Linear(2 + 2 * num_freq , hidden_dim)
        # 残差块
        self.res_blocks = nn.Sequential(
            *[nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim),
                nn.Tanh(),
                nn.Linear(hidden_dim, hidden_dim)
            ) for _ in range(5)]
        )
        self.output_layer = nn.Sequential(
            nn.Linear(hidden_dim, 1),
            # 或者 nn.Tanh()  # 约束输出在 (-1, 1) 之间
        )
        
    def forward(self, z, t, delta):
        # 周期性特征扩展
        t = t.unsqueeze(-1)
        z = z.unsqueeze(-1)
        t_norm = 2 * torch.pi * t / delta
        periodic = []
        for k in range(1, self.num_freq+1):
            periodic.append(torch.sin(k * t_norm))
            periodic.append(torch.cos(k * t_norm))
        feats = torch.cat([z, t] + periodic, dim=2)
        x = torch.tanh(self.input_layer(feats))
        for block in self.res_blocks:
            x = x + torch.tanh(block(x))
        out = self.output_layer(x)  # (batch_size, 1)
        return out

# BP-DeepONet主网络
class BPDeepONet(nn.Module):
    def __init__(self):
        super().__init__()
        self.branch = BranchNet()
        self.trunk = TrunkNet()
        
    def forward(self, signal, z, t, delta):
        # 分支网络处理生理信号
        b1, b2 = self.branch(signal)  
        b1 = b1.unsqueeze(1)  # (B, 1, 1)
        b2 = b2.unsqueeze(1)
        # 主干网络处理坐标
        k = self.trunk(z, t, delta)   # (batch, 128)
        # 双线性池化
        P = (b1 * k)
        Q = (b2 * k) # (batch,)
        return torch.concat([P, Q], dim=2)  # (batch, 2)
     
   
# 定义 a(P), H(U), B(U)
def a(P):
    A0_tensor = torch.tensor(A0, dtype=P.dtype, device=P.device)
    sqrt_A0 = math.sqrt(A0)  # 注意用 math 不是 torch
    return (A0_tensor / beta) * (P - P_ext) + sqrt_A0 + 1e-6

def H(U):
    P, Q = U[:,:,0], U[:,:,1]
    a_P = a(P)
    h11, h12 = torch.zeros_like(P), beta / (2 * a_P * A0)
    h21 = (a_P**2 / rho) - (2 * A0 * Q**2) / (beta * a_P**3)
    h22 = 2 * Q / (a_P**2)
    
    H_mat = torch.stack([
    torch.stack([h11, h12], dim=2),  # shape: (N, 2)
    torch.stack([h21, h22], dim=2)   # shape: (N, 2)
    ], dim=3) 
    return H_mat
    
def B(U):
    P, Q = U[:,:,0].unsqueeze(-1), U[:,:,1].unsqueeze(-1)
    return torch.cat([torch.zeros_like(P), -Kr * Q / (a(P)**2)], dim=2)

# 物理约束损失函数
class PhysicsLoss(nn.Module):
    def __init__(self):
        super().__init__()
        
    def forward(self, model, batch, delta):
        signal = batch[:,:,0:2].to(device)
        BC_cond = batch[:,:,2].to(device)
        t = batch[:,:,3].to(device)
        z = batch[:,:,4].to(device)
        z.requires_grad_(True)
        t.requires_grad_(True)
        # 前向计算
        signal = signal.permute(0, 2, 1)  # 从 (B, L, C) 变成 (B, C, L)
        U = model(signal, z, t, delta)
        P, Q = U[:,:,0], U[:,:, 1]
        
        # 计算导数
        dPdt = torch.autograd.grad(P, t, grad_outputs=torch.ones_like(P), create_graph=True)[0].unsqueeze(-1)
        dQdt = torch.autograd.grad(Q, t, grad_outputs=torch.ones_like(Q), create_graph=True)[0].unsqueeze(-1)
        dPdz = torch.autograd.grad(P, z, grad_outputs=torch.ones_like(P), create_graph=True)[0].unsqueeze(-1)
        dQdz = torch.autograd.grad(Q, z, grad_outputs=torch.ones_like(Q), create_graph=True)[0].unsqueeze(-1)
        

        # --------- PDE 残差 ---------
        H_U = H(U)
        dUdZ = torch.cat([dPdz, dQdz], dim=2).unsqueeze(-1)
        H_U = H_U.view(-1, 2, 2)      # [128*250, 2, 2]
        dUdZ = dUdZ.view(-1, 2, 1)      # [128*250, 2, 1]
        TMP = torch.bmm(H_U, dUdZ)      # [128*250, 2, 1]
        Ba, L = P.shape
        TMP = TMP.view(Ba, L, 2, 1).squeeze(-1)
        residual = torch.cat([dPdt, dQdt], dim=2) + TMP - B(U)
        loss_pde = torch.mean(residual**2)
        
        # --------- 边界条件 ---------
        # 出口边界（z=L)
        z_L = torch.full_like(t, L).requires_grad_()
        t_L = t.clone().requires_grad_()
        U_L = model(signal, z_L, t_L, delta)
        P_L, Q_L = U_L[:,:,0], U_L[:,: ,1]
        dPdt_L = torch.autograd.grad(P_L, t_L, grad_outputs=torch.ones_like(P_L), create_graph=True)[0]
        dQdt_L = torch.autograd.grad(Q_L, t_L, grad_outputs=torch.ones_like(Q_L), create_graph=True)[0]

        windkessel = Q_L*(1+R1/R2) + C*R1*dQdt_L - (P_L - P_ext)/R2 - C*dPdt_L
        # 输出各部分
        loss_bc = torch.mean(windkessel**2)
        # 入口边界（z=0，测量值匹配）
        loss_meas = torch.mean((P_L - BC_cond)**2)
        
        # --------- 周期性条件 ---------
        t_shift = t + delta
        U_shift = model(signal, z, t_shift, delta)
        loss_period = torch.mean((U - U_shift)**2)
        
        # if epoch < 4000:
        # 总损失
        loss = (loss_pde + 
            100.0 * loss_bc + 
            100.0 * loss_meas + 
            10.0 * loss_period)
        
        return {
            "total": loss,
            "pde": loss_pde,
            "bc": loss_bc,
            "meas": loss_meas,
            "period": loss_period
        }



# 数据加载（示例结构）
def generate_data(N=375000, sample_start_point=0, window_length=250):
    all_segments = []

    # 读取数据
    df = pd.read_csv('processed_data.csv', skiprows=2, usecols=range(5))
    S = sample_start_point * sample_rate
    torch_data = torch.from_numpy(df.values).float()

    ppg = torch_data[S:S+N, 1:2]
    abp = torch_data[S:S+N, 3:4]
    ecg = torch_data[S:S+N, 4:5]

    t = torch.linspace(0, N / sample_rate, N).unsqueeze(-1)
    z_dummy = torch.zeros_like(t)  # 先占位，后面替换

    data = torch.concat([ppg, ecg, abp, t, z_dummy], dim=1)

    for i in range(0, len(data) - window_length + 1, window_length):  
        segment = data[i:i+window_length]           
        all_segments.append(segment) 
    
    final_tensor = torch.tensor(np.array(all_segments), dtype=torch.float32)  # (B, L, 5)
    print(f"数据集大小: {final_tensor.shape}")

    # 替换 z 列
    B, L_tmp= final_tensor.shape[:2]
    z_tensor = torch.linspace(0, L, steps=L_tmp).repeat(B, 1)
    final_tensor[:, :, 4] = z_tensor  # 替换 z 列

    return final_tensor
    



# 训练循环
model = BPDeepONet().to(device)
compute_loss = PhysicsLoss()
optimizer = optim.Adam(model.parameters(), lr=2e-4)
scheduler = StepLR(optimizer, step_size=5, gamma=0.9)

train_data = generate_data(64000 , sample_start_point=train_point, window_length = 250)
dataloader = DataLoader(train_data, batch_size=64, shuffle=True)

# 记录开始时间
start_time = time.time()
last_epoch_time = start_time

model.train()  # 默认模式
for epoch in range(50000):
    total_loss = 0.0
    total_pde = 0.0
    total_bc = 0.0
    total_meas = 0.0
    total_period = 0.0
    batch_count = 0

    for batch in dataloader:
        optimizer.zero_grad()      
        loss_dict = compute_loss(model, batch, delta)
        loss = loss_dict["total"]
        loss.backward() 
        optimizer.step()

        # 累计
        total_loss += loss.item()
        total_pde += loss_dict['pde'].item()
        total_bc += loss_dict['bc'].item()
        total_meas += loss_dict['meas'].item()
        total_period += loss_dict['period'].item()
        batch_count += 1
    
    if epoch < 500:
        scheduler.step()

    # 每100个epoch输出平均loss和时间
    if epoch % 25 == 0:
        current_time = time.time()
        total_duration = current_time - start_time
        
        print(f"Epoch {epoch}, "
              f"Total Loss: {total_loss / batch_count:.4f}, "
              f"PDE: {total_pde / batch_count:.6f}, "
              f"BC: {total_bc / batch_count:.6f}, "
              f"Meas: {total_meas / batch_count:.6f}, "
              f"Period: {total_period / batch_count:.6f}, "
              f"Time: {total_duration:.2f}s")
        
        #可视化
        plt.figure(figsize=(18, 6))
        N_test = 250
        # 1. 生成测试数据
        with torch.no_grad():
        # 网格预测
            test_data = generate_data(250, sample_start_point = train_point, window_length = 250) 
            t_test = test_data[:,:,3].to(device)
            z_test = torch.full_like(t_test, fill_value=L/2)
            signal_test = test_data[:,:,0:2]
            signal_test = signal_test.permute(0, 2, 1).to(device)
            U_pred = model(signal_test, z_test, t_test, delta)
         
   
       
        P_pred = U_pred[0, :, 0].detach().cpu().numpy()
        P_true = test_data[0, :, 2].cpu().numpy()  # 假设第6列是P_true（你可以改成你的）
        t = torch.linspace(0, 2, P_pred.shape[0]).numpy()  # 时间轴：2秒 250个点

        plt.figure(figsize=(10, 4))
        plt.plot(t, P_true, label='2.5@P_true')
        plt.plot(t, P_pred, '--', label='1.25@P_pred')
        plt.xlabel("Time (s)")
        plt.ylabel("Pressure (mmHg)")
        plt.title("Predicted vs True Pressure")
        plt.legend()
        plt.grid(True)
        plt.tight_layout()
        save_dir = "experimental_results"
        os.makedirs(save_dir, exist_ok=True)
        save_path = os.path.join(save_dir, f"pressure_epoch_{epoch}.png")
        plt.savefig(save_path)
        plt.close()