{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "841eb8e0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import matplotlib.pyplot as plt\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import datasets, transforms\n",
    "from collections import defaultdict\n",
    "from sklearn.metrics import precision_score, recall_score, accuracy_score\n",
    "import numpy as np\n",
    "from tqdm.notebook import tqdm\n",
    "import os\n",
    "from PIL import Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2fef2039",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCH = 40\n",
    "BATCH_SIZE = 128\n",
    "LR = 1e-4\n",
    "\n",
    "transform = transforms.Compose(\n",
    "    [\n",
    "        transforms.Grayscale(num_output_channels=1),\n",
    "        transforms.ToTensor(),\n",
    "    ]\n",
    ")\n",
    "train_file = datasets.ImageFolder(\n",
    "    root=\"../USTC-TK2016/4_Png/Train\",\n",
    "    transform=transform,\n",
    ")\n",
    "test_file = datasets.ImageFolder(\n",
    "    root='../USTC-TK2016/4_Png/Test/',\n",
    "    transform=transform\n",
    ")\n",
    "\n",
    "train_loader = DataLoader(\n",
    "    train_file,\n",
    "    batch_size=BATCH_SIZE,           # 一次读取多少张图像（可调）\n",
    "    shuffle=True,            # 打乱数据顺序（训练集建议True）\n",
    "    num_workers=8            # 多线程加载数据（可调）\n",
    ")\n",
    "test_loader = DataLoader(\n",
    "    test_file,\n",
    "    batch_size=BATCH_SIZE,           # 一次读取多少张图像（可调）\n",
    "    shuffle=True,            # 打乱数据顺序（训练集建议True）\n",
    "    num_workers=2            # 多线程加载数据（可调）\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f382ad0a",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_file[0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "841a2305",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_file.classes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10a48b5b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# class_names = train_file.classes  # list of folder names\n",
    "# # 按 label 分组图像\n",
    "# samples_by_class = defaultdict(list)\n",
    "\n",
    "# for img, label in train_loader:\n",
    "#     if len(samples_by_class[label]) < 3:  # 每类最多存 3 张\n",
    "#         samples_by_class[label].append(img)\n",
    "#     if all(len(imgs) == 1 for imgs in samples_by_class.values()):\n",
    "#         if len(samples_by_class) == len(class_names):\n",
    "#             break  # 所有类别都收集到 3 张图像了，提前退出\n",
    "\n",
    "# # 画图\n",
    "# num_classes = len(class_names)\n",
    "# plt.figure(figsize=(9, num_classes * 3))\n",
    "\n",
    "# for class_idx, class_name in enumerate(class_names):\n",
    "#     images = samples_by_class[class_idx]\n",
    "#     for i in range(3):\n",
    "#         plt.subplot(num_classes, 3, class_idx * 3 + i + 1)\n",
    "        \n",
    "#         # 关键修改：调整图像形状\n",
    "#         img = images[i]\n",
    "#         if img.shape[0] == 1:  # 如果通道为1\n",
    "#             img = img.squeeze(0)  # 移除通道维度，变成 (32, 32)\n",
    "        \n",
    "#         plt.imshow(img, cmap='gray')  # 对于灰度图像，添加 cmap='gray'\n",
    "#         plt.title(f\"{class_name} #{i+1}\")\n",
    "#         plt.axis(\"off\")\n",
    "\n",
    "# plt.tight_layout()\n",
    "# plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4df9c491",
   "metadata": {},
   "outputs": [],
   "source": [
    "class TrafficCNN(nn.Module):\n",
    "    def __init__(self, num_classes):\n",
    "        super(TrafficCNN, self).__init__()\n",
    "\n",
    "        self.conv_block1 = nn.Sequential(\n",
    "            nn.Conv2d(1, 32, kernel_size=3, padding=1),  # [B, 32, 32, 32]\n",
    "            nn.BatchNorm2d(32),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(2),  # [B, 32, 16, 16]\n",
    "        )\n",
    "\n",
    "        self.conv_block2 = nn.Sequential(\n",
    "            nn.Conv2d(32, 64, kernel_size=4, padding=2),  # [B, 64, 16, 16]\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(2),  # [B, 64, 8, 8]\n",
    "        )\n",
    "\n",
    "        self.conv_block3 = nn.Sequential(\n",
    "            nn.Conv2d(64, 128, kernel_size=3, padding=1),  # [B, 128, 8, 8]\n",
    "            nn.BatchNorm2d(128),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(2),  # [B, 128, 4, 4]\n",
    "        )\n",
    "\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Flatten(),  # [B, 128*4*4] = [B, 2048]\n",
    "            nn.Linear(128 * 4 * 4, 256),\n",
    "            nn.Sigmoid(),\n",
    "            nn.Dropout(0.3),\n",
    "            nn.Linear(256, num_classes),  # 最终输出为类别数\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        # print(x.size())\n",
    "        x = self.conv_block1(x)\n",
    "        # print(x.size())\n",
    "        x = self.conv_block2(x)\n",
    "        # print(x.size())\n",
    "        x = self.conv_block3(x)\n",
    "        # print(x.size())\n",
    "        x = self.fc(x)\n",
    "        # print(x.size())\n",
    "        return x \n",
    "model = TrafficCNN(num_classes=10)  # 假设你有 10 类网络流量类型\n",
    "x = torch.randn(4, 1, 32, 32)  # 模拟一个 batch\n",
    "output = model(x)\n",
    "print(output.shape)  # torch.Size([4, 10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ad16ac31",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_model(model, device, test_loader, criterion):\n",
    "    # 测试集评估\n",
    "    model.eval()\n",
    "    total_test_loss = 0\n",
    "    all_preds = []\n",
    "    all_targets = []\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for images, labels in test_loader:\n",
    "            images, labels = images, labels\n",
    "            outputs = model(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "            total_test_loss += loss.item() * images.size(0)\n",
    "\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            all_preds.extend(preds.cpu().numpy())\n",
    "            all_targets.extend(labels.cpu().numpy())\n",
    "\n",
    "    avg_test_loss = total_test_loss / len(test_loader.dataset)\n",
    "    test_accuracy = accuracy_score(all_targets, all_preds)\n",
    "    test_precision = precision_score(\n",
    "        all_targets, all_preds, average=\"macro\", zero_division=0\n",
    "    )\n",
    "    test_recall = recall_score(all_targets, all_preds, average=\"macro\", zero_division=0)\n",
    "\n",
    "    print(\n",
    "        f\"Test Loss: {avg_test_loss:.4f} Acc: {test_accuracy:.4f} \"\n",
    "        f\"Prec: {test_precision:.4f} Recall: {test_recall:.4f}\"\n",
    "    )\n",
    "    return avg_test_loss, test_accuracy, test_precision, test_recall"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "921c850e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model(model, train_loader, test_loader, device,optimizer,criterion, num_epochs=10):\n",
    "\n",
    "\n",
    "    # 存储每个 epoch 的指标\n",
    "    train_loss_list, test_loss_list = [], []\n",
    "    train_acc_list, test_acc_list = [], []\n",
    "    test_precision_list, test_recall_list = [], []\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        model.train()\n",
    "        total_train_loss = 0\n",
    "        total_train_correct = 0\n",
    "        total_train_samples = 0\n",
    "\n",
    "        for images, labels in tqdm(train_loader):\n",
    "            images, labels = images.to(device), labels.to(device)\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(images)\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            total_train_loss += loss.item() * images.size(0)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            total_train_correct += (preds == labels).sum().item()\n",
    "            total_train_samples += labels.size(0)\n",
    "\n",
    "        avg_train_loss = total_train_loss / total_train_samples\n",
    "        train_accuracy = total_train_correct / total_train_samples\n",
    "\n",
    "        # 测试集评估\n",
    "        model.eval()\n",
    "        total_test_loss = 0\n",
    "        all_preds = []\n",
    "        all_targets = []\n",
    "\n",
    "        with torch.no_grad():\n",
    "            for images, labels in test_loader:\n",
    "                images, labels = images.to(device), labels.to(device)\n",
    "                outputs = model(images)\n",
    "                loss = criterion(outputs, labels)\n",
    "                total_test_loss += loss.item() * images.size(0)\n",
    "\n",
    "                _, preds = torch.max(outputs, 1)\n",
    "                all_preds.extend(preds.cpu().numpy())\n",
    "                all_targets.extend(labels.cpu().numpy())\n",
    "\n",
    "        avg_test_loss = total_test_loss / len(test_loader.dataset)\n",
    "        test_accuracy = accuracy_score(all_targets, all_preds)\n",
    "        test_precision = precision_score(\n",
    "            all_targets, all_preds, average=\"macro\", zero_division=0\n",
    "        )\n",
    "        test_recall = recall_score(\n",
    "            all_targets, all_preds, average=\"macro\", zero_division=0\n",
    "        )\n",
    "\n",
    "        # 保存历史\n",
    "        train_loss_list.append(avg_train_loss)\n",
    "        test_loss_list.append(avg_test_loss)\n",
    "        train_acc_list.append(train_accuracy)\n",
    "        test_acc_list.append(test_accuracy)\n",
    "        test_precision_list.append(test_precision)\n",
    "        test_recall_list.append(test_recall)\n",
    "\n",
    "        print(\n",
    "            f\"Epoch {epoch+1}/{num_epochs} | \"\n",
    "            f\"Train Loss: {avg_train_loss:.4f} Acc: {train_accuracy:.4f} | \"\n",
    "            f\"Test Loss: {avg_test_loss:.4f} Acc: {test_accuracy:.4f} \"\n",
    "            f\"Prec: {test_precision:.4f} Recall: {test_recall:.4f}\"\n",
    "        )\n",
    "\n",
    "        # 实时绘图\n",
    "        plt.figure(figsize=(12, 6))\n",
    "\n",
    "        # Loss 曲线\n",
    "        plt.subplot(1, 2, 1)\n",
    "        plt.plot(train_loss_list, label=\"Train Loss\")\n",
    "        plt.plot(test_loss_list, label=\"Test Loss\")\n",
    "        plt.title(\"Loss Curve\")\n",
    "        plt.xlabel(\"Epoch\")\n",
    "        plt.ylabel(\"Loss\")\n",
    "        plt.legend()\n",
    "\n",
    "        # Accuracy & Recall 曲线\n",
    "        plt.subplot(1, 2, 2)\n",
    "        plt.plot(train_acc_list, label=\"Train Acc\")\n",
    "        plt.plot(test_acc_list, label=\"Test Acc\")\n",
    "        plt.plot(test_precision_list, label=\"Test Precision\")\n",
    "        plt.plot(test_recall_list, label=\"Test Recall\")\n",
    "        plt.title(\"Evaluation Metrics\")\n",
    "        plt.xlabel(\"Epoch\")\n",
    "        plt.ylabel(\"Score\")\n",
    "        plt.legend()\n",
    "\n",
    "        plt.tight_layout()\n",
    "        plt.pause(0.01)\n",
    "        plt.clf()  # 清除上一轮图像以便实时刷新\n",
    "\n",
    "    print(\"Training Complete.\")\n",
    "    plt.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ad54ad74",
   "metadata": {},
   "outputs": [],
   "source": [
    "if os.path.exists(\"TrafficCNN.pth\")  and False:\n",
    "    model.load_state_dict(torch.load(\"TrafficCNN.pth\", weights_only=True))\n",
    "    model = model.to(device)\n",
    "    model.eval()\n",
    "    print(\"load\")\n",
    "else:\n",
    "    model = TrafficCNN(len(train_file.classes)).to(device)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5e1e1bca",
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = optim.Adam(model.parameters(), lr=LR)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "train_model(model, train_loader, test_loader, device,optimizer,criterion, num_epochs=EPOCH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a9782f8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.cpu().state_dict(), \"TrafficCNN.pth\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cfb6d09a",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_model(model,device,test_loader,criterion)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c161fb3",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from torchvision import utils\n",
    "\n",
    "# 提取并保存所有卷积层的权重\n",
    "def extract_weights(model):\n",
    "    weights_dict = {}\n",
    "    \n",
    "    # 遍历模型的所有子模块\n",
    "    for name, module in model.named_modules():\n",
    "        # 检查是否为卷积层\n",
    "        if isinstance(module, nn.Conv2d):\n",
    "            # 获取权重参数 (不包括偏置)\n",
    "            weights = module.weight.detach().cpu()\n",
    "            weights_dict[name] = weights\n",
    "            \n",
    "            # 打印权重信息\n",
    "            print(f\"Layer: {name}\")\n",
    "            print(f\"  Shape: {weights.shape}\")  # [out_channels, in_channels, kernel_size, kernel_size]\n",
    "            \n",
    "            # 可选：保存为NumPy数组\n",
    "            np.save(f\"{name}_weights.npy\", weights.numpy())\n",
    "    \n",
    "    return weights_dict\n",
    "\n",
    "# 提取权重\n",
    "weights_dict = extract_weights(model)\n",
    "def visualize_deeper_layer_weights(weights_dict, layer_name='conv_block2.0', out_channel_idx=0):\n",
    "    \"\"\"可视化深层卷积层中某个输出通道对应的所有输入通道权重\"\"\"\n",
    "    if layer_name not in weights_dict:\n",
    "        print(f\"Layer {layer_name} not found!\")\n",
    "        return\n",
    "    \n",
    "    # 获取权重 [out_channels, in_channels, kernel_size, kernel_size]\n",
    "    weights = weights_dict[layer_name]\n",
    "    \n",
    "    # 选择特定输出通道的所有输入通道权重\n",
    "    out_channel_weights = weights[out_channel_idx]  # [in_channels, kernel_size, kernel_size]\n",
    "    \n",
    "    # 创建网格图像\n",
    "    grid = utils.make_grid(out_channel_weights.unsqueeze(1), nrow=8, normalize=True, padding=1)\n",
    "    \n",
    "    # 显示图像\n",
    "    plt.figure(figsize=(10, 10))\n",
    "    plt.imshow(grid.permute(1, 2, 0))\n",
    "    plt.title(f'Weights for Output Channel {out_channel_idx}')\n",
    "    plt.axis('off')\n",
    "    plt.savefig(f'{layer_name}_channel_{out_channel_idx}_weights.png')\n",
    "    plt.show()\n",
    "\n",
    "# 可视化第二层卷积的第0个输出通道对应的权重\n",
    "visualize_deeper_layer_weights(weights_dict, 'conv_block1.0', 0)\n",
    "visualize_deeper_layer_weights(weights_dict, 'conv_block2.0', 0)\n",
    "visualize_deeper_layer_weights(weights_dict, 'conv_block3.0', 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "12a709e3",
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "import os\n",
    "import random\n",
    "\n",
    "# 创建中间输出的存储列表\n",
    "feature_maps = []\n",
    "\n",
    "\n",
    "# 定义钩子函数\n",
    "def hook(module, input, output):\n",
    "    feature_maps.append(output.detach())\n",
    "\n",
    "\n",
    "# 实例化模型\n",
    "num_classes = 10  # 根据实际情况设置类别数\n",
    "model = TrafficCNN(num_classes)\n",
    "\n",
    "# 注册钩子\n",
    "hook_handles = []\n",
    "hook_handles.append(model.conv_block1.register_forward_hook(hook))\n",
    "hook_handles.append(model.conv_block2.register_forward_hook(hook))\n",
    "hook_handles.append(model.conv_block3.register_forward_hook(hook))\n",
    "\n",
    "\n",
    "# 加载并预处理图像\n",
    "def load_image(image_path):\n",
    "    transform = transforms.Compose(\n",
    "        [\n",
    "            transforms.Resize((32, 32)),\n",
    "            transforms.Grayscale(),\n",
    "            transforms.ToTensor(),\n",
    "        ]\n",
    "    )\n",
    "    img = Image.open(image_path).convert(\"L\")\n",
    "    img_tensor = transform(img).unsqueeze(0)\n",
    "    return img_tensor\n",
    "\n",
    "\n",
    "base_folder = \"../USTC-TK2016/4_Png/Test/4\"\n",
    "images = os.listdir(base_folder)\n",
    "image_tensor = load_image(os.path.join(base_folder, random.choice(images)))\n",
    "# 前向传播\n",
    "with torch.no_grad():\n",
    "    model.eval()\n",
    "    output = model(image_tensor)\n",
    "\n",
    "\n",
    "# 释放钩子\n",
    "for handle in hook_handles:\n",
    "    handle.remove()\n",
    "\n",
    "\n",
    "# 可视化特征图\n",
    "def visualize_feature_maps(input_pic, feature_maps, layer_names):\n",
    "    # 创建图像\n",
    "    plt.figure()\n",
    "    plt.suptitle(f\"Input image\", fontsize=16)\n",
    "\n",
    "    plt.imshow(input_pic, cmap=\"gray\")\n",
    "    plt.axis(\"off\")\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "    for i, (feature_map, layer_name) in enumerate(zip(feature_maps, layer_names)):\n",
    "        # 确保在CPU上\n",
    "        feature_map = feature_map.cpu()\n",
    "\n",
    "        # 获取特征图数量\n",
    "        num_maps = feature_map.shape[1]\n",
    "        # 计算网格大小\n",
    "        grid_size = int(np.ceil(np.sqrt(num_maps)))\n",
    "\n",
    "        # 创建图像\n",
    "        plt.figure(figsize=(grid_size, grid_size))\n",
    "        plt.suptitle(f\"Feature Maps: {layer_name}\", fontsize=16)\n",
    "\n",
    "        for idx in range(num_maps):\n",
    "            plt.subplot(grid_size, grid_size, idx + 1)\n",
    "            plt.imshow(feature_map[0, idx], cmap=\"gray\")\n",
    "            plt.axis(\"off\")\n",
    "\n",
    "        plt.tight_layout()\n",
    "        plt.savefig(f\"feature_maps_{layer_name}.png\")\n",
    "        plt.show()\n",
    "\n",
    "\n",
    "# 定义层名称\n",
    "layer_names = [\"conv_block1\", \"conv_block2\", \"conv_block3\"]\n",
    "\n",
    "# 可视化特征图\n",
    "visualize_feature_maps(image_tensor.squeeze(0).squeeze(0), feature_maps, layer_names)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".conda",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
