{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d45f5822",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "import sys\n",
    "import torch\n",
    "\n",
    "import torch\n",
    "import random\n",
    "import torch.optim as optim\n",
    "from tqdm.notebook import tqdm\n",
    "# from tqdm import tqdm\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import DataLoader\n",
    "from utils.dataset_utils import CombinedDataset, SubsetDataset\n",
    "from utils.parameters import My_Params\n",
    "from synthesizers.synthesizer import Synthesizer\n",
    "from synthesizers.pattern_synthesizer import PatternSynthesizer\n",
    "from synthesizers.blend_synthesizer import BlendSynthesizer\n",
    "from synthesizers.inputaware_synthesizer import InputAwareSynthesizer\n",
    "from synthesizers.wanet_synthesizer import WaNetSynthesizer\n",
    "from synthesizers.CL_synthesizer import CLSynthesizer\n",
    "from tasks.task import Task\n",
    "from tasks.cifar10_task import Cifar10Task\n",
    "from tasks.imagenet10_task import Imagenet10Task\n",
    "from tasks.gtsrb_task import GtsrbTask\n",
    "from utils.utils import evaluate\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6722cca3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "512*block.expansion=512\n",
      "开始生成并保存样本....\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "74cc45677a0e4439aa6feb3d0a4d3707",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始生成并保存样本....\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3977b2045cdc43ebafb9ff6cb87c4af1",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/40 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "51500\n"
     ]
    }
   ],
   "source": [
    "param = My_Params()\n",
    "param.data_path = '/home/star/sda1/data/1744ecab-59b5-4839-9124-a5d97b52e660/datasets/cifar10'\n",
    "# task = Imagenet10Task(param)\n",
    "task = Cifar10Task(param)\n",
    "# synthesizer = BlendSynthesizer(task)\n",
    "synthesizer = CLSynthesizer(task)\n",
    "# synthesizer = BlendSynthesizer(task)\n",
    "# synthesizer = InputAwareSynthesizer(task, dataset='cifar10')\n",
    "# synthesizer = WaNetSynthesizer(task, dataset='cifar10')\n",
    "# synthesizer = PatternSynthesizer(task)\n",
    "\n",
    "# CL攻击才使用attack_params参数\n",
    "# synthesizer = InputAwareSynthesizer(task, dataset='cifar10')\n",
    "attack_params = {'adv':True}\n",
    "# backdoor_set = SubsetDataset(original_dataset=task.train_dataset, r=0.3, device=param.device, synthesizer=synthesizer, target=8,\n",
    "#                             only_posion_target=False, Save_sample=False, clean_label=False, attack_params=None, rm_tar=False)\n",
    "\n",
    "attack_params = {'adv':True}\n",
    "backdoor_set = SubsetDataset(original_dataset=task.train_dataset, r=0.3, device=param.device, \n",
    "                             synthesizer=synthesizer, target=8, only_posion_target=True, Save_sample=True, clean_label=True, attack_params=attack_params)\n",
    "\n",
    "bd_trainingset = SubsetDataset(original_dataset=task.train_dataset, r=1.0, device=param.device, synthesizer=synthesizer, target=8, \n",
    "                               clean_label=False, attack_params={'adv':False})\n",
    "bd_trainingset_target = SubsetDataset(original_dataset=task.train_dataset, r=1, device=param.device, \n",
    "                             synthesizer=synthesizer, target=8, only_posion_target=True, Save_sample=True, clean_label=True, attack_params={'adv':True})\n",
    "\n",
    "bd_testset = SubsetDataset(original_dataset=task.test_dataset, r=1.0, device=param.device, synthesizer=synthesizer, target=8, \n",
    "                           clean_label=False, attack_params={'adv':False})\n",
    "\n",
    "backdoor_dataset = CombinedDataset(clean_set=task.train_dataset, backdoor_set=backdoor_set, device=param.device)\n",
    "\n",
    "#非CL攻击使用如下参数\n",
    "\n",
    "\n",
    "# backdoor_set = SubsetDataset(original_dataset=task.train_dataset, r=0.1, device=param.device, synthesizer=synthesizer, target=8,\n",
    "#                             only_posion_target=False, Save_sample=False, clean_label=False, attack_params=None, rm_tar=False)\n",
    "# bd_trainingset = SubsetDataset(original_dataset=task.train_dataset, r=1.0, device=param.device, synthesizer=synthesizer, target=8,\n",
    "#                             only_posion_target=False, Save_sample=False, clean_label=False, attack_params=None, rm_tar=False)\n",
    "# bd_testset = SubsetDataset(original_dataset=task.test_dataset, r=1.0, device=param.device, synthesizer=synthesizer, target=8,\n",
    "#                             only_posion_target=False, Save_sample=False, clean_label=False, attack_params=None, rm_tar=False)\n",
    "\n",
    "# backdoor_dataset = CombinedDataset(clean_set=task.train_dataset, backdoor_set=backdoor_set, device=param.device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "852ae9bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "clean_trainingset_loader = DataLoader(dataset=task.test_dataset, batch_size=32, shuffle=True, num_workers=0)\n",
    "backdoor_trainingset_loader = DataLoader(dataset=bd_trainingset, batch_size=32, shuffle=True, num_workers=0)\n",
    "# backdoor_trainingset_tar_loader = DataLoader(dataset=bd_trainingset_target, batch_size=256, shuffle=True, num_workers=0)\n",
    "clean_testset_loader = DataLoader(dataset=task.test_dataset, batch_size=32, shuffle=True, num_workers=0)\n",
    "backdoor_testset_loader = DataLoader(dataset=bd_testset, batch_size=32, shuffle=True, num_workers=0)\n",
    "\n",
    "mixed_dataloader = DataLoader(dataset=backdoor_dataset, batch_size=32, shuffle=True, num_workers=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c6c040a6",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_param = {\n",
    "    \"epoch\" : 50,\n",
    "    'model_lr': 1e-3,\n",
    "    'criterion' : nn.CrossEntropyLoss(),\n",
    "    'criterion_MSE' : nn.MSELoss(),\n",
    "\n",
    "}\n",
    "\n",
    "\n",
    "def train(model, dataloader, clean_testset_loader, backdoor_testset_loader, param, train_param):\n",
    "    optimizer = optim.Adam(model.parameters(), lr=train_param['model_lr'])\n",
    "    criterion = train_param['criterion']\n",
    "    model.train()\n",
    "    # 训练模型\n",
    "    num_epochs = train_param['epoch'] # 增加训练轮数\n",
    "    for epoch in range(num_epochs):\n",
    "        for data in tqdm(dataloader, desc=\"Training\"): # for data in mixed_dataloader:\n",
    "            if len(data) == 3:\n",
    "                imgs, labels, is_bd = data\n",
    "            else:\n",
    "                imgs, labels = data\n",
    "                \n",
    "            imgs, labels = imgs.to(param.device), labels.to( param.device)\n",
    "                \n",
    "            out = model(imgs)\n",
    "            loss = criterion(out, labels)\n",
    "               \n",
    "            # 反向传播\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')\n",
    "            \n",
    "        \n",
    "        avg_clean_loss, clean_acc = evaluate(model, clean_testset_loader, criterion,  param.device)\n",
    "        avg_backdoor_loss, backdoor_acc = evaluate(model, backdoor_testset_loader, criterion,  param.device)\n",
    "        print(\"Deep:\\nclean_acc:{} , backdoor_acc:{}\".format(clean_acc,backdoor_acc))\n",
    "        print(\"avg_clean_loss:{} , avg_backdoor_loss:{}\\n\".format(avg_clean_loss,avg_backdoor_loss))\n",
    "        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b86de16",
   "metadata": {},
   "outputs": [],
   "source": [
    "backdoor_model = task.build_model().to(param.device)\n",
    "\n",
    "# 2. 使用过滤后的可信数据进行训练，这个调用是正确的\n",
    "train(backdoor_model, mixed_dataloader, clean_testset_loader, backdoor_testset_loader, param, train_param)\n",
    "\n",
    "# 3. 定义保存路径\n",
    "save_dir = './save_model/v8/'\n",
    "# (推荐) 确保保存目录存在，防止因目录不存在而出错\n",
    "os.makedirs(save_dir, exist_ok=True)\n",
    "\n",
    "save_filter_clean_model_path = os.path.join(save_dir, 'backdoor_model_cl.pth')\n",
    "\n",
    "# 4. 【修正】保存模型的状态字典 (state_dict)\n",
    "torch.save(backdoor_model.state_dict(), save_filter_clean_model_path)\n",
    "\n",
    "print(f\"模型已成功保存到: {save_filter_clean_model_path}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "backdoor",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
