{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5cc8c40e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using device: cuda\n",
      "PCG data shape: (55020, 2000)\n",
      "ECG data shape: (55020, 2000)\n",
      "Sampled PCG data shape: torch.Size([5000, 2000])\n",
      "Sampled ECG data shape: torch.Size([5000, 2000])\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Einstein\\AppData\\Local\\Temp\\ipykernel_29184\\2781091537.py:113: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  model.load_state_dict(torch.load(save_path, map_location=device))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ Loaded existing model weights from 'seq2seq_best_model.pt'\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import DataLoader, TensorDataset, random_split\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from pathlib import Path\n",
    "import pandas as pd\n",
    "from datetime import datetime\n",
    "\n",
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "print(f\"Using device: {device}\")\n",
    "\n",
    "# ---------- 模型 ----------\n",
    "\n",
    "class Seq2SeqModel(nn.Module):\n",
    "    def __init__(self, input_dim=1, conv_channels=64, conv_kernel_size=7, conv_layers=3,\n",
    "                 gru_hidden=128, gru_layers=2, transformer_heads=4, transformer_layers=4,\n",
    "                 output_dim=1, dropout=0.1):\n",
    "        super().__init__()\n",
    "\n",
    "        # 普通卷积（带 dilation）\n",
    "        convs = []\n",
    "        for i in range(conv_layers):\n",
    "            in_ch = input_dim if i == 0 else conv_channels\n",
    "            dilation = 2 ** i\n",
    "            padding = ((conv_kernel_size - 1) * dilation) // 2\n",
    "            convs.append(nn.Conv1d(in_ch, conv_channels, conv_kernel_size,\n",
    "                                   dilation=dilation, padding=padding))\n",
    "            convs.append(nn.ReLU())\n",
    "        self.conv_layers = nn.Sequential(*convs)\n",
    "\n",
    "        # GRU\n",
    "        self.gru = nn.GRU(input_size=conv_channels,\n",
    "                          hidden_size=gru_hidden,\n",
    "                          num_layers=gru_layers,\n",
    "                          batch_first=True)\n",
    "\n",
    "        # Transformer Encoder\n",
    "        encoder_layer = nn.TransformerEncoderLayer(\n",
    "            d_model=gru_hidden,\n",
    "            nhead=transformer_heads,\n",
    "            dim_feedforward=gru_hidden*4,\n",
    "            dropout=dropout,\n",
    "            activation='relu',\n",
    "            batch_first=True)\n",
    "        self.transformer_encoder = nn.TransformerEncoder(encoder_layer,\n",
    "                                                         num_layers=transformer_layers)\n",
    "\n",
    "        # 输出线性层\n",
    "        self.output_layer = nn.Linear(gru_hidden, output_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x: (B, L) -> (B, L, 1)\n",
    "        x = x.unsqueeze(1)  # (B, 1, L) -> (B, 1, L)\n",
    "        \n",
    "        # 卷积层： (B, 1, L) -> (B, conv_channels, L)\n",
    "        x = self.conv_layers(x)  \n",
    "        \n",
    "        # 将卷积结果变回 (B, L, conv_channels)\n",
    "        x = x.permute(0, 2, 1)  # (B, L, conv_channels)\n",
    "\n",
    "        # GRU 层\n",
    "        gru_out, _ = self.gru(x)  # (B, L, gru_hidden)\n",
    "\n",
    "        # Transformer 编码器\n",
    "        transformer_out = self.transformer_encoder(gru_out)  # (B, L, gru_hidden)\n",
    "\n",
    "        # 输出层\n",
    "        out = self.output_layer(transformer_out)  # (B, L, 1)\n",
    "        return out.squeeze(-1)  # (B, L)\n",
    "\n",
    "# ---------- 自定义残差 + 差分损失函数 ----------\n",
    "\n",
    "def residual_diff_loss(input, output, target, diff_weight=0.5):\n",
    "    \"\"\"残差差异损失\"\"\"\n",
    "    pred_seq = input + output\n",
    "    # 重建损失\n",
    "    recon_loss = F.mse_loss(pred_seq, target)\n",
    "    \n",
    "    # 一阶差分损失\n",
    "    diff1 = pred_seq[:, 1:] - pred_seq[:, :-1]\n",
    "    target_diff1 = target[:, 1:] - target[:, :-1]\n",
    "    diff_loss1 = F.mse_loss(diff1, target_diff1)\n",
    "    \n",
    "    # 二阶差分损失\n",
    "    diff2 = pred_seq[:, 2:] - 2 * pred_seq[:, 1:-1] + pred_seq[:, :-2]\n",
    "    target_diff2 = target[:, 2:] - 2 * target[:, 1:-1] + target[:, :-2]\n",
    "    diff_loss2 = F.mse_loss(diff2, target_diff2)\n",
    "    \n",
    "    return recon_loss + diff_weight * (diff_loss1 + diff_loss2)\n",
    "\n",
    "# ---------- 训练函数 ----------\n",
    "\n",
    "def train_model_with_best_save(input_tensor, target_tensor, epochs=100, batch_size=32,\n",
    "                               lr=1e-4, val_ratio=0.9, save_path='best_model.pt',\n",
    "                               diff_weight=1.0, load_best=True):\n",
    "\n",
    "    dataset = TensorDataset(input_tensor, target_tensor)\n",
    "    val_size = int(len(dataset) * val_ratio)\n",
    "    train_size = len(dataset) - val_size\n",
    "    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])\n",
    "\n",
    "    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
    "    val_loader = DataLoader(val_dataset, batch_size=batch_size)\n",
    "\n",
    "    model = Seq2SeqModel(input_dim=1, output_dim=1).to(device)\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n",
    "\n",
    "    # ========== 加载已有模型权重（如存在） ========== \n",
    "    best_val_loss = float('inf')\n",
    "    if load_best and Path(save_path).exists():\n",
    "        model.load_state_dict(torch.load(save_path, map_location=device))\n",
    "        print(f\"✅ Loaded existing model weights from '{save_path}'\")\n",
    "\n",
    "    # 记录训练和验证损失的列表\n",
    "    train_losses = []\n",
    "    val_losses = []\n",
    "\n",
    "    # ========== 训练循环 ========== \n",
    "    for epoch in range(epochs):\n",
    "        model.train()\n",
    "        total_train_loss = 0.0\n",
    "\n",
    "        for batch_x, batch_y in train_loader:\n",
    "            batch_x = batch_x.to(device)\n",
    "            batch_y = batch_y.to(device)\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            residual = model(batch_x)  # [B, L] -> (B, L)\n",
    "            loss = residual_diff_loss(batch_x, residual, batch_y, diff_weight)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            total_train_loss += loss.item() * batch_x.size(0)\n",
    "\n",
    "        avg_train_loss = total_train_loss / train_size\n",
    "\n",
    "        # ========== 验证 ========== \n",
    "        model.eval()\n",
    "        total_val_loss = 0.0\n",
    "        with torch.no_grad():\n",
    "            for val_x, val_y in val_loader:\n",
    "                val_x = val_x.to(device)\n",
    "                val_y = val_y.to(device)\n",
    "                residual = model(val_x)\n",
    "                val_loss = residual_diff_loss(val_x, residual, val_y, diff_weight)\n",
    "                total_val_loss += val_loss.item() * val_x.size(0)\n",
    "\n",
    "        avg_val_loss = total_val_loss / val_size\n",
    "\n",
    "        # 记录每个epoch的损失\n",
    "        train_losses.append(avg_train_loss)\n",
    "        val_losses.append(avg_val_loss)\n",
    "\n",
    "        print(f\"Epoch {epoch+1}/{epochs} | Train Loss: {avg_train_loss:.6f} | Val Loss: {avg_val_loss:.6f}\")\n",
    "\n",
    "        # 保存最佳模型\n",
    "        if avg_val_loss < best_val_loss:\n",
    "            best_val_loss = avg_val_loss\n",
    "            torch.save(model.state_dict(), save_path)\n",
    "            print(f\"  ✅ New best model saved at '{save_path}' with val loss: {best_val_loss:.6f}\")\n",
    "\n",
    "    # 保存损失到 CSV 文件\n",
    "    current_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "    csv_filename = f\"training_losses_{current_time}.csv\"\n",
    "    loss_df = pd.DataFrame({\n",
    "        \"Epoch\": range(1, epochs + 1),\n",
    "        \"Train Loss\": train_losses,\n",
    "        \"Val Loss\": val_losses\n",
    "    })\n",
    "    loss_df.to_csv(csv_filename, index=False)\n",
    "    print(f\"✅ Losses saved to {csv_filename}\")\n",
    "\n",
    "    print(\"✅ Training complete.\")\n",
    "    return model\n",
    "\n",
    "# ---------- 主函数测试 ----------\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    # 指定生成的 .npy 文件路径\n",
    "    pcg_file_path = r'F:\\datasheet\\03时间序列分类\\13PCG_ECG\\ephnogram-a-simultaneous-electrocardiogram-and-phonocardiogram-database-1.0.0\\MAT\\all_PCG_resampled_slices.npy'\n",
    "    ecg_file_path = r'F:\\datasheet\\03时间序列分类\\13PCG_ECG\\ephnogram-a-simultaneous-electrocardiogram-and-phonocardiogram-database-1.0.0\\MAT\\all_ECG_resampled_slices.npy'\n",
    "\n",
    "    # 加载数据\n",
    "    datax = np.load(pcg_file_path).astype(np.float32)  # PCG 数据作为输入\n",
    "    datay = np.load(ecg_file_path).astype(np.float32)  # ECG 数据作为标签\n",
    "\n",
    "    print(f\"PCG data shape: {datax.shape}\")\n",
    "    print(f\"ECG data shape: {datay.shape}\")\n",
    "\n",
    "    # 随机抽取 10000 个样本\n",
    "    num_samples = 5000\n",
    "    if num_samples > datax.shape[0]:\n",
    "        raise ValueError(f\"Requested {num_samples} samples, but only {datax.shape[0]} samples available.\")\n",
    "\n",
    "    # 使用 numpy 随机选择索引\n",
    "    indices = np.random.choice(datax.shape[0], size=num_samples, replace=False)\n",
    "\n",
    "    # 根据索引抽取样本\n",
    "    datax_sampled = datax[indices]\n",
    "    datay_sampled = datay[indices]\n",
    "\n",
    "    # 转换为 PyTorch 张量\n",
    "    input_tensor = torch.tensor(datax_sampled, dtype=torch.float32).to(device)\n",
    "    target_tensor = torch.tensor(datay_sampled, dtype=torch.float32).to(device)\n",
    "\n",
    "    print(f\"Sampled PCG data shape: {input_tensor.shape}\")\n",
    "    print(f\"Sampled ECG data shape: {target_tensor.shape}\")\n",
    "    # 转换为 PyTorch 张量\n",
    "    input_tensor = torch.tensor(datax, dtype=torch.float32).to(device)\n",
    "    target_tensor = torch.tensor(datay, dtype=torch.float32).to(device)\n",
    "\n",
    "    # 训练模型\n",
    "    \n",
    "    model = train_model_with_best_save(input_tensor, target_tensor,\n",
    "                                       epochs=1000,\n",
    "                                       batch_size=128,\n",
    "                                       lr=1e-5,\n",
    "                                       save_path='seq2seq_best_model.pt',\n",
    "                                       diff_weight=1.2)\n",
    "    \n",
    "    # 加载最优模型\n",
    "    best_model = Seq2SeqModel(input_dim=1).to(device)\n",
    "    best_model.load_state_dict(torch.load('seq2seq_best_model.pt', map_location=device))\n",
    "    best_model.eval()\n",
    "\n",
    "    # 推理与可视化\n",
    "    with torch.no_grad():\n",
    "        idx = 1000  # 测试第1000个样本\n",
    "        test_input = input_tensor[idx].unsqueeze(0)  # (1, L)\n",
    "        residual = best_model(test_input)  # (1, L)\n",
    "        pred = residual + test_input.squeeze(-1)  # (1, L)\n",
    "\n",
    "        plt.figure(figsize=(12, 5))\n",
    "        plt.plot(pred.squeeze().cpu().numpy(), label='Predicted + Residual')\n",
    "        plt.plot(target_tensor[idx].cpu().numpy(), label='Target', alpha=0.7)\n",
    "        plt.title('Prediction vs Target')\n",
    "        plt.legend()\n",
    "        plt.tight_layout()\n",
    "        plt.show()\n",
    "\n",
    "    # 可视化训练后的数据\n",
    "    plt.figure()\n",
    "    plt.plot(datax[0])\n",
    "    plt.title('PCG Data Sample')\n",
    "    plt.show()\n",
    "\n",
    "    plt.figure()\n",
    "    plt.plot(pred.cpu().numpy().flatten())\n",
    "    plt.title('Predicted ECG Data Sample')\n",
    "    plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python (mycv)",
   "language": "python",
   "name": "mycv"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
