{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "84c361ed",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/miniconda3/envs/ultralytics/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import torch\n",
    "import torchvision\n",
    "import time\n",
    "from torchvision import models\n",
    "from torch.utils.data import Dataset,DataLoader,TensorDataset\n",
    "from sklearn.datasets import fetch_openml\n",
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "import torch.nn as nn\n",
    "from torch.autograd.function import Function\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torchvision import datasets, transforms\n",
    "from  torch.utils.data import DataLoader\n",
    "import torch.optim.lr_scheduler as lr_scheduler\n",
    "import random\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "import json\n",
    "import copy\n",
    "import cv2\n",
    "import timm\n",
    "import re\n",
    "import itertools\n",
    "from collections import defaultdict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "0b3737b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ToTensor16bit:\n",
    "    def __call__(self, pic: np.ndarray):\n",
    "        assert pic.ndim == 2, \"只支持灰度图\"\n",
    "        tensor = torch.from_numpy(pic.astype(np.float32))  # 原始 uint16 转 float32\n",
    "        tensor = tensor.unsqueeze(0)  # [H,W] -> [1,H,W]，单通道\n",
    "        tensor = tensor / 65535.0  # 映射到 0~1\n",
    "        return tensor\n",
    "    \n",
    "transforms_16bit = transforms.Compose([\n",
    "    ToTensor16bit(),\n",
    "    transforms.Normalize(mean=[0.5],  # 灰度图只需要一个通道的均值\n",
    "                         std=[0.5])  # 灰度图只需要一个通道的标准差\n",
    "])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "424274d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "class AF9Dataset(Dataset):\n",
    "    def __init__(self, image_paths, class_labels, domain_labels):\n",
    "        self.image_paths = image_paths\n",
    "        self.labels = class_labels\n",
    "        self.domain_labels = domain_labels\n",
    "        self.transform = transforms.Compose([\n",
    "            ToTensor16bit(),\n",
    "            transforms.Normalize(mean=[0.5],  # 灰度图只需要一个通道的均值\n",
    "                                std=[0.5])  # 灰度图只需要一个通道的标准差\n",
    "        ])\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        img_path = self.image_paths[idx]\n",
    "        img16 = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32)\n",
    "        img16 = cv2.resize(img16, (128, 128))\n",
    "        image = self.transform(img16)\n",
    "        class_label = self.labels[idx]\n",
    "        domain_label = self.domain_labels[idx]\n",
    "        if domain_label == 15:\n",
    "            domain_label = 0\n",
    "        elif domain_label == 26:\n",
    "            domain_label = 1\n",
    "        elif domain_label == 35:\n",
    "            domain_label = 2\n",
    "        return image, class_label, domain_label\n",
    "\n",
    "def load_diameters_datasets(root_dir, diameters):\n",
    "    \"\"\"\n",
    "    按 类别->程度->瓶子 聚合，并仅加载指定域(直径)的数据。\n",
    "    目录命名: S{cls}_{domain}_{severity}\n",
    "    文件命名: S{cls}_{domain}_{severity}_B{bottle}_F{frame}.png\n",
    "    返回:\n",
    "        index: dict[int][int][str] -> List[str]\n",
    "               index[class_id][severity][bottle_id] = [image_path, ...]\n",
    "    \"\"\"\n",
    "    dir_pat = re.compile(r\"^S(\\d+)_([\\d]+)_(\\d+)$\")  # 目录: S2_35_2\n",
    "    file_pat = re.compile(r\"^S(\\d+)_([\\d]+)_(\\d+)_B(\\d+)_F(\\d+)\\.png$\", re.IGNORECASE)\n",
    "\n",
    "    index = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n",
    "\n",
    "    # 遍历子目录，筛选指定域\n",
    "    for de in os.scandir(root_dir):\n",
    "        if not de.is_dir():\n",
    "            continue\n",
    "        m = dir_pat.match(de.name)\n",
    "        if not m:\n",
    "            continue\n",
    "        cls_id, domain_id, sev_id = m.groups()\n",
    "        if domain_id != str(diameters):\n",
    "            continue  # 仅保留目标域\n",
    "\n",
    "        cls_id = int(cls_id)\n",
    "        sev_id = int(sev_id)\n",
    "\n",
    "        # 收集该目录下的所有帧\n",
    "        for fn in os.listdir(de.path):\n",
    "            fm = file_pat.match(fn)\n",
    "            if not fm:\n",
    "                continue\n",
    "            cls2, domain2, sev2, bottle_id, frame_id = fm.groups()\n",
    "            # 双重保险：再次核对域、类别、程度\n",
    "            if domain2 != str(diameters) or int(cls2) != cls_id or int(sev2) != sev_id:\n",
    "                continue\n",
    "            index[cls_id][sev_id][bottle_id].append(os.path.join(de.path, fn))\n",
    "\n",
    "    # 转回普通 dict： {class_id: {severity: {bottle_id: [image_paths...]}}}\n",
    "    return {c: {s: dict(bdict) for s, bdict in sev_map.items()} for c, sev_map in index.items()}\n",
    "\n",
    "def _priority_sample_bottles(sev_to_bottles, N, rng, priority=(2, 1)):\n",
    "    \"\"\"\n",
    "    按给定程度优先级顺序选取瓶子，直到达到 N 个或没有可选。\n",
    "    sev_to_bottles: {sev: [bottle_id, ...]}\n",
    "    priority: 优先级从高到低的程度列表，例如 (2,1)\n",
    "    返回: [(sev, bottle_id), ...]\n",
    "    \"\"\"\n",
    "    # 复制并打乱每个程度下的瓶子列表\n",
    "    work = {s: sev_to_bottles[s][:] for s in sev_to_bottles}\n",
    "    for s in work:\n",
    "        rng.shuffle(work[s])\n",
    "\n",
    "    # 构造遍历顺序：先优先级中存在的，再补其余（去重）\n",
    "    ordered_sevs = [s for s in priority if s in work]\n",
    "    ordered_sevs += [s for s in work.keys() if s not in priority]\n",
    "\n",
    "    picked = []\n",
    "    remaining = N\n",
    "    for s in ordered_sevs:\n",
    "        if remaining <= 0:\n",
    "            break\n",
    "        take = min(remaining, len(work[s]))\n",
    "        for _ in range(take):\n",
    "            picked.append((s, work[s].pop()))\n",
    "        remaining -= take\n",
    "    return picked\n",
    "\n",
    "def _round_robin_sample_bottles(sev_to_bottles, N, rng):\n",
    "    \"\"\"\n",
    "    轮转/均匀地在多个“程度”之间抽取瓶子，尽量平均且不重复。\n",
    "    当 N>=3 且可用程度包含 {1,2,3} 时，轮转顺序固定为：2,1,3,2,1,3,...\n",
    "    \"\"\"\n",
    "    # 确定轮转顺序\n",
    "    if N >= 3 and all(s in sev_to_bottles for s in (2, 1, 3)):\n",
    "        sevs = [2, 1, 3]  # 固定顺序\n",
    "    else:\n",
    "        # 保留原行为：随机打乱顺序\n",
    "        sevs = list(sev_to_bottles.keys())\n",
    "        rng.shuffle(sevs)\n",
    "\n",
    "    picked = []\n",
    "    # 将每个程度的瓶子列表复制并随机打乱\n",
    "    work = {s: sev_to_bottles[s][:] for s in sevs}\n",
    "    for s in work:\n",
    "        rng.shuffle(work[s])\n",
    "\n",
    "    # 按 sevs 轮转直到满 N 或没有可取的瓶子\n",
    "    while len(picked) < N:\n",
    "        progressed = False\n",
    "        for s in sevs:\n",
    "            if len(picked) >= N:\n",
    "                break\n",
    "            if work[s]:\n",
    "                b = work[s].pop()  # 弹出一个\n",
    "                picked.append((s, b))\n",
    "                progressed = True\n",
    "        if not progressed:\n",
    "            break  # 所有程度都没有可用瓶子了\n",
    "    return picked\n",
    "\n",
    "def n_shot_split(dataset, N_shot=None, frames_per_bottle=1, seed=1, domain_id=None):\n",
    "    \"\"\"\n",
    "    dataset: load_diameters_datasets 返回的索引结构\n",
    "             dict[class_id][severity][bottle_id] = [img_paths...]\n",
    "    domain_id: 目录中的域字符串，例如 '15'/'26'/'35'，用于生成域标签 0/1/2\n",
    "    返回: train_image_paths, train_class_labels, train_domain_labels,\n",
    "         test_image_paths,  test_class_labels,  test_domain_labels\n",
    "    \"\"\"\n",
    "    rng = random.Random(seed)\n",
    "    if domain_id is None:\n",
    "        raise ValueError(\"n_shot_split 需要提供 domain_id（如 '15'/'26'/'35'）以生成域标签。\")\n",
    "    domain_map = {'15': 0, '26': 1, '35': 2}\n",
    "    if str(domain_id) not in domain_map:\n",
    "        raise ValueError(f\"未知的 domain_id: {domain_id}. 期望为 '15'/'26'/'35'。\")\n",
    "    d_label = domain_map[str(domain_id)]\n",
    "\n",
    "    train_image_paths, train_class_labels, train_domain_labels = [], [], []\n",
    "    test_image_paths,  test_class_labels,  test_domain_labels  = [], [], []\n",
    "\n",
    "    # 遍历每个类别，dataset：{class_id: {severity: {bottle_id: [image_paths...]}}}\n",
    "    for class_id, sev_map in dataset.items():\n",
    "        # 收集每个程度的瓶子列表\n",
    "        sev_to_bottles = {sev: list(bdict.keys()) for sev, bdict in sev_map.items()}\n",
    "        total_bottles = sum(len(v) for v in sev_to_bottles.values())\n",
    "\n",
    "        if N_shot is None or N_shot >= total_bottles:\n",
    "            # 训练：所有瓶子的所有帧；测试：无\n",
    "            for sev, bdict in sev_map.items():\n",
    "                for bottle_id, img_list in bdict.items():\n",
    "                    for image_path in img_list:\n",
    "                        train_image_paths.append(image_path)\n",
    "                        train_class_labels.append(int(class_id))\n",
    "                        train_domain_labels.append(int(d_label))\n",
    "            continue\n",
    "\n",
    "        # N_shot 为整数：选择瓶子\n",
    "        if isinstance(N_shot, int) and N_shot < 3:\n",
    "            picked_pairs = _priority_sample_bottles(sev_to_bottles, N_shot, rng, priority=(2, 1))\n",
    "        else:\n",
    "            picked_pairs = _round_robin_sample_bottles(sev_to_bottles, N_shot, rng)\n",
    "        picked_set = {(sev, b) for sev, b in picked_pairs}\n",
    "\n",
    "        # 训练集：每个被选中的瓶子取 frames_per_bottle 帧\n",
    "        for sev, bottle_id in picked_pairs:\n",
    "            img_list = dataset[class_id][sev][bottle_id]\n",
    "            k = frames_per_bottle\n",
    "            if len(img_list) >= k:\n",
    "                chosen_paths = rng.sample(img_list, k)  # 无放回\n",
    "            else:\n",
    "                chosen_paths = [rng.choice(img_list) for _ in range(k)]  # 不足则允许重复\n",
    "                print(\"Warning: Not enough images for bottle_id:\", bottle_id)\n",
    "            for image_path in chosen_paths:\n",
    "                train_image_paths.append(image_path)\n",
    "                train_class_labels.append(int(class_id))\n",
    "                train_domain_labels.append(int(d_label))\n",
    "\n",
    "        # 测试集：其余未选中的瓶子的所有帧\n",
    "        for sev, bdict in sev_map.items():\n",
    "            for bottle_id, img_list in bdict.items():\n",
    "                if (sev, bottle_id) in picked_set:\n",
    "                    continue\n",
    "                for image_path in img_list:\n",
    "                    test_image_paths.append(image_path)\n",
    "                    test_class_labels.append(int(class_id))\n",
    "                    test_domain_labels.append(int(d_label))\n",
    "\n",
    "    return (train_image_paths, train_class_labels, train_domain_labels,\n",
    "            test_image_paths,  test_class_labels,  test_domain_labels)\n",
    "\n",
    "def al9_domain_dataloader(datasets_root_dir, src_domain, src_n_shot, tar_domain, tar_n_shot, batch_size, seed):\n",
    "    src_domain_dataset = load_diameters_datasets(datasets_root_dir, src_domain)\n",
    "    tar_domain_dataset = load_diameters_datasets(datasets_root_dir, tar_domain)\n",
    "\n",
    "    (src_train_image_paths, src_train_class_labels, src_train_domain_labels,\n",
    "     _, _, _) = n_shot_split(src_domain_dataset, src_n_shot, frames_per_bottle=3, seed=seed, domain_id=src_domain)\n",
    "\n",
    "    (tar_train_image_paths, tar_train_class_labels, tar_train_domain_labels,\n",
    "     tar_test_image_paths, tar_test_class_labels, tar_test_domain_labels) = n_shot_split(\n",
    "        tar_domain_dataset, tar_n_shot, frames_per_bottle=3, seed=seed, domain_id=tar_domain)\n",
    "\n",
    "    src_train_dataset = AF9Dataset(src_train_image_paths, src_train_class_labels, src_train_domain_labels)\n",
    "    tar_train_dataset = AF9Dataset(tar_train_image_paths, tar_train_class_labels, tar_train_domain_labels)\n",
    "    test_dataset      = AF9Dataset(tar_test_image_paths,  tar_test_class_labels,  tar_test_domain_labels)\n",
    "\n",
    "    src_train_loader = DataLoader(src_train_dataset, batch_size=batch_size//2, shuffle=True,\n",
    "                                  generator=torch.Generator().manual_seed(seed))\n",
    "    tar_train_loader = DataLoader(tar_train_dataset, batch_size=batch_size//2, shuffle=True,\n",
    "                                  generator=torch.Generator().manual_seed(seed))\n",
    "    tar_test_loader  = DataLoader(test_dataset, batch_size=batch_size, shuffle=True,\n",
    "                                  generator=torch.Generator().manual_seed(seed))\n",
    "\n",
    "    return src_train_loader, tar_train_loader, tar_test_loader\n",
    "\n",
    "# if __name__ == \"__main__\":\n",
    "#     datasets_root_dir = 'your_dataset_root_path'\n",
    "#     src_domain = '35'\n",
    "#     src_n_shot = None\n",
    "#     tar_domain = '15'\n",
    "#     tar_n_shot = 3\n",
    "#     batch_size = 32\n",
    "#     seed = 42\n",
    "#     src_train_loader, tar_train_loader, tar_test_loader = al9_domain_dataloader(datasets_root_dir, src_domain, src_n_shot, tar_domain, tar_n_shot, batch_size, seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "5ca5a1d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DivAugModel(nn.Module):\n",
    "    def __init__(self, num_classes=9, num_domains=3, pretrained=True, inchans=1):\n",
    "        super().__init__()\n",
    "        self.Ec = timm.create_model('timm/mobilenetv3_small_100.lamb_in1k', pretrained=pretrained, in_chans=inchans, num_classes=0)\n",
    "        self.Ed = timm.create_model('timm/mobilenetv3_small_100.lamb_in1k', pretrained=pretrained, in_chans=inchans, num_classes=0)\n",
    "        self.Cc = nn.Sequential(\n",
    "            nn.Linear(2048, 1024),\n",
    "            nn.Linear(1024, 128),\n",
    "            nn.Linear(128, num_classes),\n",
    "        )\n",
    "        self.Cd = nn.Sequential(\n",
    "            nn.Linear(2048, 1024),\n",
    "            nn.Linear(1024, 128),\n",
    "            nn.Linear(128, num_domains)\n",
    "        )\n",
    "\n",
    "    def forward_features(self, x1, x2):\n",
    "        f_c = self.Ec(x1)\n",
    "        f_d = self.Ed(x2)\n",
    "        return f_c, f_d\n",
    "\n",
    "    def feature_concat(self, f_c, f_d):\n",
    "        f_concat = torch.cat([f_c, f_d], dim=1)\n",
    "        return f_concat\n",
    "    \n",
    "    def class_classify(self, f_concat):\n",
    "        pre_c = self.Cc(f_concat)\n",
    "        return pre_c\n",
    "    \n",
    "    def domain_classify(self, f_concat):\n",
    "        pre_d = self.Cd(f_concat)\n",
    "        return pre_d\n",
    "    \n",
    "    def forward(self, x1, x2, phase):\n",
    "        f_c, f_d = self.forward_features(x1, x2)\n",
    "        f_concat = self.feature_concat(f_c, f_d.detach())\n",
    "        pre_c = self.class_classify(f_concat)\n",
    "        if phase == 1:\n",
    "            f_concat = self.feature_concat(f_c.detach(), f_d)\n",
    "            pre_d = self.domain_classify(f_concat)\n",
    "        else:\n",
    "            pre_d = None\n",
    "        return pre_c, pre_d, f_c, f_d\n",
    "\n",
    "# if __name__ == '__main__':\n",
    "#     model = DivAugModel()\n",
    "#     image = torch.rand(128, 128)\n",
    "#     image = image.unsqueeze(0).unsqueeze(0)\n",
    "#     f_c, f_d = model.forward_features(image, image)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "0f613565",
   "metadata": {},
   "outputs": [],
   "source": [
    "def semantic_inconsistency_loss(c_feats, d_feats, tau=0.3):\n",
    "    B, _ = c_feats.shape\n",
    "    device = c_feats.device\n",
    "    if B < 2:\n",
    "        raise ValueError(\"Batch size must be at least 2.\")\n",
    "    # 计算相似度矩阵\n",
    "    c_norm = F.normalize(c_feats, p=2, dim=1)\n",
    "    d_norm = F.normalize(d_feats, p=2, dim=1)\n",
    "    sim_matrix = torch.matmul(c_norm, d_norm.T) / tau\n",
    "    # 选取每行的正样本\n",
    "    row_indices = torch.arange(B, device=device).unsqueeze(1).expand(B, B-1) \n",
    "    col_candidates = torch.zeros(B, B-1, dtype=torch.long, device=device)\n",
    "    for i in range(B):\n",
    "        candidates = torch.cat([torch.arange(i, device=device), \n",
    "                               torch.arange(i+1, B, device=device)])\n",
    "        col_candidates[i] = candidates\n",
    "    random_indices = torch.randint(0, B-1, (B,), device=device) \n",
    "    positive_cols = col_candidates[torch.arange(B), random_indices] \n",
    "    positive_sims = sim_matrix[torch.arange(B, device=device), positive_cols] \n",
    "    # 负样本, 每行除了正样本的其他元素\n",
    "    mask = torch.ones(B, B, device=device, dtype=torch.bool)\n",
    "    mask[torch.arange(B, device=device), positive_cols] = False\n",
    "    negative_sims = sim_matrix[mask].view(B, B-1)  \n",
    "    # 对比损失, all_logits每行的第1个元素是正样本, softmax + BCE\n",
    "    all_logits = torch.cat([positive_sims.unsqueeze(1), negative_sims], dim=1)  \n",
    "    labels = torch.zeros(B, dtype=torch.long, device=device) \n",
    "    loss = F.cross_entropy(all_logits, labels)\n",
    "    return loss\n",
    "\n",
    "def Loss_s(c_feats, d_feats, tau=0.3):\n",
    "    Loss_c2d = semantic_inconsistency_loss(c_feats, d_feats, tau)\n",
    "    Loss_d2c = semantic_inconsistency_loss(d_feats, c_feats, tau)\n",
    "    return Loss_c2d + Loss_d2c\n",
    "\n",
    "# if __name__ == '__main__':\n",
    "#     # 测试Loss_s函数\n",
    "#     device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    \n",
    "#     # 测试用例1：完全正交的特征\n",
    "#     print(\"=== 测试1：正交特征 ===\")\n",
    "#     c_feats_orth = torch.tensor([[1.0, 0.0], [0.0, 1.0]], device=device)\n",
    "#     d_feats_orth = torch.tensor([[0.0, 1.0], [1.0, 0.0]], device=device)\n",
    "#     loss_orth = Loss_s(c_feats_orth, d_feats_orth)\n",
    "#     print(f\"正交特征Loss_s: {loss_orth.item():.4f}\")\n",
    "    \n",
    "#     # 测试用例2：完全相同的特征\n",
    "#     print(\"\\n=== 测试2：相同特征 ===\")\n",
    "#     c_feats_same = torch.tensor([[1.0, 0.0], [0.0, 1.0]], device=device)\n",
    "#     d_feats_same = torch.tensor([[1.0, 0.0], [0.0, 1.0]], device=device)\n",
    "#     loss_same = Loss_s(c_feats_same, d_feats_same)\n",
    "#     print(f\"相同特征Loss_s: {loss_same.item():.4f}\")\n",
    "    \n",
    "#     # 测试用例3：半正交特征\n",
    "#     print(\"\\n=== 测试3：半正交特征 ===\")\n",
    "#     c_feats_semi = torch.tensor([[1.0, 0.0], [0.0, 1.0]], device=device)\n",
    "#     d_feats_semi = torch.tensor([[0.707, 0.707], [-0.707, 0.707]], device=device)  # 45度旋转\n",
    "#     loss_semi = Loss_s(c_feats_semi, d_feats_semi)\n",
    "#     print(f\"半正交特征Loss_s: {loss_semi.item():.4f}\")\n",
    "    \n",
    "#     # 测试用例4：随机特征\n",
    "#     print(\"\\n=== 测试4：随机特征 ===\")\n",
    "#     torch.manual_seed(42)\n",
    "#     c_feats_rand = torch.randn(4, 8, device=device)\n",
    "#     d_feats_rand = torch.randn(4, 8, device=device)\n",
    "#     loss_rand = Loss_s(c_feats_rand, d_feats_rand)\n",
    "#     print(f\"随机特征Loss_s: {loss_rand.item():.4f}\")\n",
    "    \n",
    "#     print(\"\\n=== 预期结果 ===\")\n",
    "#     print(\"语义不一致损失应该：\")\n",
    "#     print(\"- 相同特征时最小（接近0）\")\n",
    "#     print(\"- 正交特征时较大\") \n",
    "#     print(\"- 半正交特征时中等\")\n",
    "#     print(\"- 随机特征时变化较大\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "8e563212",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_and_evaluation(datasets_root_dir, output_dir, \n",
    "          src_domain, src_n_shot, tar_domain, tar_n_shot, seed, \n",
    "          learning_rate=1e-3, momentum=0.9, weight_decay=5e-4, \n",
    "          num_epochs=100, batch_size=32, tau=0.3, lamda=3.0, N_t=10):\n",
    "    \n",
    "    writer = SummaryWriter(log_dir=f'{output_dir}')\n",
    "    DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "    \n",
    "    src_train_loader, tar_train_loader, tar_test_loader = al9_domain_dataloader(datasets_root_dir, src_domain, src_n_shot, tar_domain, tar_n_shot, batch_size, seed)\n",
    "    tar_train_cycle_loader = itertools.cycle(tar_train_loader)\n",
    "    model = DivAugModel(num_classes=9, num_domains=3, pretrained=True, inchans=1).to(DEVICE)\n",
    "    \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    \n",
    "    # optimizer_c = torch.optim.SGD(\n",
    "    #     list(model.Ec.parameters()) + list(model.Cc.parameters()), \n",
    "    #     lr=learning_rate, momentum=momentum, weight_decay=weight_decay\n",
    "    # )\n",
    "    # optimizer_d = torch.optim.SGD(\n",
    "    #     list(model.Ed.parameters()) + list(model.Cd.parameters()), \n",
    "    #     lr=learning_rate, momentum=momentum, weight_decay=weight_decay\n",
    "    # )\n",
    "    # scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_c, T_max=num_epochs, eta_min=0)\n",
    "    # scheduler_d = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_d, T_max=num_epochs, eta_min=0)\n",
    "    optimizer_c = torch.optim.Adam(\n",
    "        list(model.Ec.parameters()) + list(model.Cc.parameters()), \n",
    "        lr=learning_rate)\n",
    "    optimizer_d = torch.optim.Adam(\n",
    "        list(model.Ed.parameters()) + list(model.Cd.parameters()), \n",
    "        lr=learning_rate)\n",
    "    scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_c, T_max=num_epochs, eta_min=0)\n",
    "    scheduler_d = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_d, T_max=num_epochs, eta_min=0)\n",
    "    # scheduler_c = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_c, patience=3, factor=0.5, min_lr=1e-6)\n",
    "    # scheduler_d = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, patience=3, factor=0.5, min_lr=1e-6)\n",
    "    \n",
    "    best_acc_c = 0.0\n",
    "    for epoch in range(num_epochs):\n",
    "        model.train()\n",
    "        total_train_samples = 0  # 添加总样本数计数器\n",
    "        train_correct_predict_c = 0\n",
    "        train_acc_c = 0.0\n",
    "        total_loss_c = 0.0\n",
    "        total_loss_d = 0.0\n",
    "        total_loss_s = 0.0\n",
    "        total_loss_aug = 0.0\n",
    "        Ed_Cd_freeze_flag = False\n",
    "        for src_images, src_labels, src_domain in src_train_loader:\n",
    "            tar_images, tar_labels, tar_domain = next(tar_train_cycle_loader)\n",
    "            images = torch.cat([src_images, tar_images], dim=0).to(DEVICE)\n",
    "            labels = torch.cat([src_labels, tar_labels], dim=0).to(DEVICE)\n",
    "            domain = torch.cat([src_domain, tar_domain], dim=0).to(DEVICE)\n",
    "            if epoch < N_t:\n",
    "                phase = 1\n",
    "                pre_c, pre_d, f_c, f_d = model(images, images, phase)\n",
    "                \n",
    "                optimizer_c.zero_grad()\n",
    "                loss_c = criterion(pre_c, labels)\n",
    "                loss_s1 = Loss_s(f_c, f_d.detach(), tau)\n",
    "                loss_c_s = loss_c + loss_s1\n",
    "                loss_c_s.backward()\n",
    "                optimizer_c.step()\n",
    "                # scheduler_c.step()\n",
    "                \n",
    "                optimizer_d.zero_grad()\n",
    "                loss_d = criterion(pre_d, domain)\n",
    "                loss_s2 = Loss_s(f_c.detach(), f_d, tau)\n",
    "                loss_d_s = loss_d + loss_s2\n",
    "                loss_d_s.backward()\n",
    "                optimizer_d.step()\n",
    "                # scheduler_d.step()\n",
    "            else:\n",
    "                phase = 2\n",
    "                if Ed_Cd_freeze_flag == False:\n",
    "                    for param in model.Ed.parameters():\n",
    "                        param.requires_grad = False\n",
    "                    for param in model.Cd.parameters():\n",
    "                        param.requires_grad = False\n",
    "                    Ed_Cd_freeze_flag = True\n",
    "                pre_c, _, c_feats, d_feats = model(images, images, phase)\n",
    "                \n",
    "                optimizer_c.zero_grad()\n",
    "                loss_c = criterion(pre_c, labels)\n",
    "                loss_s1 = Loss_s(c_feats, d_feats.detach(), tau)\n",
    "                \n",
    "                src_batch = src_images.size(0)\n",
    "                tar_batch = tar_images.size(0)\n",
    "                N = min(src_batch, tar_batch)\n",
    "                torch.manual_seed(epoch * 1000 + seed)\n",
    "                \n",
    "                src_indices = torch.randperm(src_batch, device=DEVICE)[:N]\n",
    "                src_c_feats = c_feats[src_indices]\n",
    "                \n",
    "                tar_indices = torch.randperm(tar_batch, device=DEVICE)[:N] + src_batch\n",
    "                tar_d_feats = d_feats[tar_indices]\n",
    "                \n",
    "                f_aug = torch.cat([src_c_feats, tar_d_feats], dim=1)\n",
    "                f_aug_labels = labels[src_indices]\n",
    "                \n",
    "                pre_c_aug = model.class_classify(f_aug)\n",
    "                loss_aug = criterion(pre_c_aug, f_aug_labels)\n",
    "                \n",
    "                loss_c_s_aug = loss_c + loss_s1 + lamda * loss_aug\n",
    "                loss_c_s_aug.backward()\n",
    "                optimizer_c.step()\n",
    "                # scheduler_c.step()\n",
    "                \n",
    "            total_loss_c += loss_c.item() * src_images.size(0)\n",
    "            if phase == 1: \n",
    "                total_loss_d += loss_d.item() * src_images.size(0)\n",
    "                total_loss_s += (loss_s1.item() + loss_s2.item())/2 * src_images.size(0)\n",
    "            else: \n",
    "                total_loss_aug += loss_aug.item() * src_images.size(0)\n",
    "                total_loss_s += loss_s1.item() * src_images.size(0)\n",
    "                \n",
    "            predict_c = torch.max(pre_c, 1)[1]\n",
    "            train_correct_predict_c += torch.sum(predict_c == labels.data)\n",
    "            total_train_samples += labels.size(0)\n",
    "\n",
    "        epoch_loss_c = total_loss_c / len(src_train_loader.dataset)\n",
    "        writer.add_scalar('Loss/train_loss_c', epoch_loss_c, epoch)\n",
    "        epoch_loss_s = total_loss_s / len(src_train_loader.dataset)\n",
    "        writer.add_scalar('Loss/train_loss_s', epoch_loss_s, epoch)\n",
    "        if phase == 1:\n",
    "            scheduler_c.step()\n",
    "            scheduler_d.step()\n",
    "            epoch_loss_d = total_loss_d / len(src_train_loader.dataset)\n",
    "            writer.add_scalar('Loss/train_loss_d', epoch_loss_d, epoch)\n",
    "        else:\n",
    "            scheduler_c.step()\n",
    "            epoch_loss_aug = total_loss_aug / len(src_train_loader.dataset)\n",
    "            writer.add_scalar('Loss/train_loss_aug', epoch_loss_aug, epoch)\n",
    "        train_acc_c = train_correct_predict_c.double() / total_train_samples\n",
    "        writer.add_scalar('Acc/train_acc_c', train_acc_c, epoch)\n",
    "\n",
    "        model.eval()\n",
    "        test_loss_c = 0.0\n",
    "        test_loss_d = 0.0\n",
    "        test_correct_predict_c = 0\n",
    "        test_correct_predict_d = 0\n",
    "        for tar_images, tar_labels, tar_domain in tar_test_loader:\n",
    "            with torch.no_grad():\n",
    "                tar_images = tar_images.to(DEVICE)\n",
    "                tar_labels = tar_labels.to(DEVICE)\n",
    "                tar_domain = tar_domain.to(DEVICE)\n",
    "                pre_c, pre_d, f_c, f_d = model(tar_images, tar_images, phase)\n",
    "                \n",
    "                test_loss_c += criterion(pre_c, tar_labels)\n",
    "                predict_c = torch.max(pre_c, 1)[1]\n",
    "                test_correct_predict_c += torch.sum(predict_c == tar_labels.data)\n",
    "                if phase == 1:\n",
    "                    test_loss_d += criterion(pre_d, tar_domain.to(DEVICE))\n",
    "                    predict_d = torch.max(pre_d, 1)[1]\n",
    "                    test_correct_predict_d += torch.sum(predict_d == tar_domain.data)\n",
    "        \n",
    "        test_loss_c = test_loss_c / len(tar_test_loader)\n",
    "        # scheduler_c.step(test_loss_c)\n",
    "        writer.add_scalar('Loss/test_loss_c', test_loss_c, epoch)\n",
    "        if phase == 1:\n",
    "            test_loss_d = test_loss_d / len(tar_test_loader)\n",
    "            # scheduler_d.step(test_loss_d)\n",
    "            writer.add_scalar('Loss/test_loss_d', test_loss_d, epoch)\n",
    "        test_acc_c = test_correct_predict_c.double() / len(tar_test_loader.dataset)            \n",
    "        if test_acc_c > best_acc_c:\n",
    "            best_acc_c = test_acc_c\n",
    "            torch.save(model.state_dict(), os.path.join(output_dir, 'best_model.pth'))\n",
    "        writer.add_scalar('Acc/test_acc_c', test_acc_c, epoch)\n",
    "        writer.add_scalar('Acc/test_best_acc_c', best_acc_c, epoch)     \n",
    "    writer.close()\n",
    "    return test_acc_c.cpu().item(), best_acc_c.cpu().item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0124c4f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def compute_mean_std_acc(acc_dict, save_path):\n",
    "    # 备份原始 acc_dict\n",
    "    backup_acc_dict = copy.deepcopy(acc_dict)\n",
    "\n",
    "    # 存储均值和标准差\n",
    "    stats = {}\n",
    "    for key, value in acc_dict.items():\n",
    "        domain_pair = \"_\".join(key.split(\"_\")[:3])  # e.g. 15_to_26\n",
    "        if domain_pair not in stats:\n",
    "            stats[domain_pair] = []\n",
    "        stats[domain_pair].append(value)\n",
    "\n",
    "    mean_std_results = {}\n",
    "    all_means = []\n",
    "    for domain_pair, values in stats.items():\n",
    "        mean = np.mean(values)\n",
    "        std = np.std(values)\n",
    "        # 保存为 mean±std 形式（保留小数位可自行调整，比如:.4f）\n",
    "        mean_std_results[domain_pair] = f\"{mean*100:.2f}±{std*100:.1f}\"\n",
    "        all_means.append(mean)\n",
    "    \n",
    "    # 计算宏平均\n",
    "    macro_results = {\n",
    "        \"macro_mean\": f\"{np.mean(all_means)*100:.2f}\",\n",
    "        \"macro_std\": f\"{np.std(all_means)*100:.1f}\"\n",
    "    }\n",
    "    \n",
    "    # 最终保存的内容\n",
    "    output = {\n",
    "        \"per_fold_acc\": backup_acc_dict,\n",
    "        \"5fold_mean_std_acc\": mean_std_results,\n",
    "        \"macro_acc\": macro_results\n",
    "    }\n",
    "\n",
    "    # 保存到 JSON 文件\n",
    "    with open(save_path, \"w\", encoding=\"utf-8\") as f:\n",
    "        json.dump(output, f, indent=4, ensure_ascii=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "6973d610",
   "metadata": {},
   "outputs": [],
   "source": [
    "def seed_everything(seed):\n",
    "    random.seed(seed)\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5edbff5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "root_data = 'your_dataset_root_path'\n",
    "base_model = 'mobilenetv3_small_100'  #'VGG16'  #'mobilenetv3_small_100'\n",
    "src_N_shot=30\n",
    "tar_N_shot=1\n",
    "learning_rate=1e-4\n",
    "n_epoch = 100\n",
    "n_class = 9\n",
    "N_t = 10\n",
    "source_target_domain = [['15','26'], ['15','35'], ['26','35'], ['26','15'], ['35','15'], ['35','26']]\n",
    "# source_target_domain = [['15','35'], ['26','15'], ['26','35'], ['35','15'], ['35','26']]\n",
    "root_output = f'./AF9-DivAug/{src_N_shot}-{tar_N_shot}-shot'\n",
    "best_acc_dict = {}\n",
    "last_acc_dict = {}\n",
    "for source_domain, target_domain in source_target_domain:\n",
    "    for fold_id in range(5):\n",
    "        output_dir = f'{root_output}/{source_domain}_to_{target_domain}/fold_{fold_id}'\n",
    "        random_seed = fold_id\n",
    "        seed_everything(random_seed)\n",
    "        last_acc, best_acc = train_and_evaluation(root_data, output_dir, \n",
    "                                                source_domain, src_N_shot, target_domain, tar_N_shot, random_seed, \n",
    "                                                learning_rate=learning_rate, momentum=0.9, weight_decay=5e-4, \n",
    "                                                num_epochs=n_epoch, batch_size=32, tau=0.3, lamda=3.0, N_t=N_t)\n",
    "        last_acc_dict[f'{source_domain}_to_{target_domain}_fold{fold_id}'] = last_acc\n",
    "        best_acc_dict[f'{source_domain}_to_{target_domain}_fold{fold_id}'] = best_acc\n",
    "        compute_mean_std_acc(last_acc_dict, f'{root_output}/last_acc_all_results.json')\n",
    "        compute_mean_std_acc(best_acc_dict, f'{root_output}/best_acc_all_results.json')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a8f8f643",
   "metadata": {},
   "outputs": [],
   "source": [
    "root_data = 'your_dataset_root_path'\n",
    "base_model = 'mobilenetv3_small_100'  #'VGG16'  #'mobilenetv3_small_100'\n",
    "src_N_shot=30\n",
    "tar_N_shot=3\n",
    "learning_rate=1e-4\n",
    "n_epoch = 100\n",
    "n_class = 9\n",
    "N_t = 10\n",
    "source_target_domain = [['15','26'], ['15','35'], ['26','35'], ['26','15'], ['35','15'], ['35','26']]\n",
    "# source_target_domain = [['15','35'], ['26','15'], ['26','35'], ['35','15'], ['35','26']]\n",
    "root_output = f'./AF9-DivAug/{src_N_shot}-{tar_N_shot}-shot'\n",
    "best_acc_dict = {}\n",
    "last_acc_dict = {}\n",
    "for source_domain, target_domain in source_target_domain:\n",
    "    for fold_id in range(5):\n",
    "        output_dir = f'{root_output}/{source_domain}_to_{target_domain}/fold_{fold_id}'\n",
    "        random_seed = fold_id\n",
    "        seed_everything(random_seed)\n",
    "        last_acc, best_acc = train_and_evaluation(root_data, output_dir, \n",
    "                                                source_domain, src_N_shot, target_domain, tar_N_shot, random_seed, \n",
    "                                                learning_rate=learning_rate, momentum=0.9, weight_decay=5e-4, \n",
    "                                                num_epochs=n_epoch, batch_size=32, tau=0.3, lamda=3.0, N_t=N_t)\n",
    "        last_acc_dict[f'{source_domain}_to_{target_domain}_fold{fold_id}'] = last_acc\n",
    "        best_acc_dict[f'{source_domain}_to_{target_domain}_fold{fold_id}'] = best_acc\n",
    "        compute_mean_std_acc(last_acc_dict, f'{root_output}/last_acc_all_results.json')\n",
    "        compute_mean_std_acc(best_acc_dict, f'{root_output}/best_acc_all_results.json')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ultralytics",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
