{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import paddle\n",
    "import time\n",
    "import paddle.nn as nn\n",
    "import paddle.nn.functional as F\n",
    "import paddle.vision.transforms as transforms\n",
    "from paddle.io import DataLoader\n",
    "import numpy as np\n",
    "import paddle.optimizer.lr as lrScheduler\n",
    "from paddle.vision.transforms import BaseTransform\n",
    "import matplotlib.pyplot as plt\n",
    "import math\n",
    "from tqdm import tqdm\n",
    "import os\n",
    "\n",
    "paddle.seed(1024)\n",
    "np.random.seed(1234)\n",
    "\n",
    "print(paddle.__version__)\n",
    "\n",
    "class AutoTransforms(BaseTransform):\n",
    "    def __init__(self, transforms=None, keys=None):\n",
    "        super(AutoTransforms, self).__init__(keys)\n",
    "        self.transforms = transforms\n",
    "\n",
    "    def _apply_image(self, image):\n",
    "        if self.transforms is None: \n",
    "            return image\n",
    "        choose=np.random.randint(0, len(self.transforms))\n",
    "        return self.transforms[choose](image)\n",
    "\n",
    "# 训练集数据增强\n",
    "mean = [0.5071, 0.4867, 0.4408]\n",
    "std = [0.2675, 0.2565, 0.2761]\n",
    "\n",
    "transforms_list= [\n",
    "    transforms.BrightnessTransform(0.5),\n",
    "    transforms.SaturationTransform(0.5),\n",
    "    transforms.ContrastTransform(0.5),\n",
    "    transforms.HueTransform(0.5),\n",
    "    transforms.RandomRotation(15,\n",
    "                              expand=True,\n",
    "                              fill=128),\n",
    "    transforms.ColorJitter(0.5,0.5,0.5,0.5),\n",
    "    transforms.Grayscale(3)\n",
    "]\n",
    "\n",
    "train_tx = transforms.Compose([\n",
    "           transforms.RandomHorizontalFlip(),\n",
    "           AutoTransforms(transforms_list),\n",
    "           transforms.RandomCrop(32),\n",
    "           transforms.RandomVerticalFlip(),\n",
    "           transforms.Transpose(),\n",
    "           transforms.Normalize(0.0, 255.0),\n",
    "           transforms.Normalize(mean, std)\n",
    "])\n",
    "\n",
    "val_tx = transforms.Compose([\n",
    "         transforms.Transpose(),\n",
    "         transforms.Normalize(0.0, 255.0),\n",
    "         transforms.Normalize(mean, std)\n",
    "])\n",
    "\n",
    "cifar100_train = paddle.vision.datasets.Cifar100(mode='train', transform=train_tx, download=True)\n",
    "cifar100_test = paddle.vision.datasets.Cifar100(mode='test', transform=val_tx, download=True)\n",
    "\n",
    "print('训练集数量:', len(cifar100_train), '训练集图像尺寸', cifar100_train[0][0].shape)\n",
    "print('测试集数量:', len(cifar100_test), '测试集图像尺寸', cifar100_test[0][0].shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def anti_normalize(image):\n",
    "    image = paddle.to_tensor(image)\n",
    "    t_mean = paddle.to_tensor(mean).reshape([3, 1, 1]).expand([3, 32, 32])\n",
    "    t_std = paddle.to_tensor(std).reshape([3, 1, 1]).expand([3, 32, 32])\n",
    "    return (image * t_std + t_mean).transpose([1, 2, 0])\n",
    "\n",
    "def plot_num_images(num, data):\n",
    "    if num < 1:\n",
    "        print('INFO:The number of input pictures must be greater than zero!')\n",
    "    else:\n",
    "        choose_list = []\n",
    "        for i in range(num):\n",
    "            choose_n = np.random.randint(len(data))\n",
    "            choose_list.append(choose_n)\n",
    "        fig = plt.gcf()\n",
    "        fig.set_size_inches(10, 10)\n",
    "        for i in range(num):\n",
    "            ax_img = plt.subplot(math.ceil(num / int(math.sqrt(num))), int(math.sqrt(num)), i + 1)\n",
    "            single_data = data[choose_list[i]]\n",
    "            plt_img = anti_normalize(single_data[0])\n",
    "            ax_img.imshow(plt_img, cmap='binary')\n",
    "            ax_img.set_title('label:' + str(single_data[1]),\n",
    "                             fontsize=10)\n",
    "            ax_img.axis('off')\n",
    "        plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_num_images(25, cifar100_train)\n",
    "plot_num_images(25, cifar100_test)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Patches(paddle.nn.Layer):\n",
    "    def __init__(self, patch_size):\n",
    "        super(Patches, self).__init__()\n",
    "        self.patch_size = patch_size\n",
    "\n",
    "    def forward(self, images):\n",
    "        patches = F.unfold(images, self.patch_size, self.patch_size)\n",
    "        return patches.transpose([0,2,1])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "image_size = 32\n",
    "patch_size = 8\n",
    "\n",
    "image = anti_normalize(paddle.to_tensor(cifar100_test[np.random.choice(len(cifar100_test))][0]))\n",
    "fig=plt.figure(figsize=(8, 4))\n",
    "grid = plt.GridSpec(4, 8, wspace=0.5, figure=fig)\n",
    "plt.subplot(grid[:4, :4])\n",
    "plt.imshow(image)\n",
    "plt.axis(\"off\")\n",
    "\n",
    "patches = Patches(patch_size)(image.transpose([2, 0, 1]).unsqueeze(0))\n",
    "\n",
    "print(f\"Image size: {image_size} X {image_size}\")\n",
    "print(f\"Patch size: {patch_size} X {patch_size}\")\n",
    "print(f\"Patches per image: {patches.shape[1]}\")\n",
    "print(f\"Elements per patch: {patches.shape[-1]}\")\n",
    "\n",
    "for i, patch in enumerate(patches[0]):\n",
    "    plt.subplot(grid[i // 4, i % 4 + 4])\n",
    "    patch_img = patch.reshape([3, patch_size, patch_size]).transpose([1,2,0])\n",
    "    plt.imshow(patch_img)\n",
    "    plt.axis(\"off\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Mlp(nn.Layer):\n",
    "    def __init__(self, feats, mlp_hidden, dropout=0.1):\n",
    "        super().__init__()\n",
    "        self.fc1 = nn.Linear(feats, mlp_hidden)\n",
    "        self.fc2 = nn.Linear(mlp_hidden, feats)\n",
    "        self.act = nn.GELU()\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.fc1(x)\n",
    "        x = self.act(x)\n",
    "        x = self.dropout(x)\n",
    "        x = self.fc2(x)\n",
    "        x = self.dropout(x)\n",
    "\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MultiHeadSelfAttention(nn.Layer):\n",
    "    def __init__(self, feats, head=8, dropout=0., attn_dropout=0.0):\n",
    "        super(MultiHeadSelfAttention, self).__init__()\n",
    "        self.head = head\n",
    "        self.feats = feats\n",
    "        self.sqrt_d = self.feats ** 0.5\n",
    "        self.qkv = nn.Linear(feats,\n",
    "                             feats * 3)\n",
    "        self.out = nn.Linear(feats, feats)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.attn_dropout = nn.Dropout(attn_dropout)\n",
    "\n",
    "    def transpose_multi_head(self, x):\n",
    "        new_shape = x.shape[:-1] + [self.head, self.feats//self.head]\n",
    "        x = x.reshape(new_shape)\n",
    "        x = x.transpose([0, 2, 1, 3])\n",
    "        return x\n",
    "\n",
    "    def forward(self, x):\n",
    "        b, n, f = x.shape\n",
    "        qkv = self.qkv(x).chunk(3, -1)\n",
    "        q, k, v = map(self.transpose_multi_head, qkv)\n",
    "        attn = F.softmax(paddle.einsum(\"bhif, bhjf->bhij\", q, k) / self.sqrt_d, axis=-1)\n",
    "        attn = self.attn_dropout(attn)\n",
    "        attn = paddle.einsum(\"bhij, bhjf->bihf\", attn, v)\n",
    "        out = self.dropout(self.out(attn.flatten(2)))\n",
    "        return out\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TransformerEncoder(nn.Layer):\n",
    "    def __init__(self, feats, mlp_hidden, head=8, dropout=0., attn_dropout=0.):\n",
    "        super(TransformerEncoder, self).__init__()\n",
    "        self.layer1 = nn.LayerNorm(feats)\n",
    "        self.msa = MultiHeadSelfAttention(feats, head=head, dropout=dropout, attn_dropout=attn_dropout)\n",
    "        self.layer2 = nn.LayerNorm(feats)\n",
    "        self.mlp = Mlp(feats, mlp_hidden)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.msa(self.layer1(x)) + x\n",
    "        out = self.mlp(self.layer2(out)) + out\n",
    "        return out\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ViT(nn.Layer):\n",
    "    def __init__(self, in_c=3, num_classes=10, img_size=32, patch=8, dropout=0., attn_dropout=0.0, num_layers=7, hidden=384, mlp_hidden=384*4, head=8, is_cls_token=True):\n",
    "        super(ViT, self).__init__()\n",
    "        self.patch = patch\n",
    "        self.is_cls_token = is_cls_token\n",
    "        self.patch_size = img_size // self.patch\n",
    "        self.patches = Patches(self.patch_size)\n",
    "        f = (img_size // self.patch) ** 2 * 3\n",
    "        num_tokens = (self.patch ** 2) + 1 if self.is_cls_token else (self.patch ** 2)\n",
    "\n",
    "        self.emb = nn.Linear(f, hidden)\n",
    "        self.cls_token  = paddle.create_parameter(\n",
    "            shape = [1, 1, hidden],\n",
    "            dtype = 'float32',\n",
    "            default_initializer=nn.initializer.Assign(paddle.randn([1, 1, hidden]))\n",
    "        ) if is_cls_token else None\n",
    "\n",
    "        self.pos_embedding  = paddle.create_parameter(\n",
    "            shape = [1,num_tokens, hidden],\n",
    "            dtype = 'float32',\n",
    "            default_initializer=nn.initializer.Assign(paddle.randn([1,num_tokens, hidden]))\n",
    "        )\n",
    "\n",
    "        encoder_list = [TransformerEncoder(hidden, mlp_hidden=mlp_hidden, dropout=dropout, attn_dropout=attn_dropout, head=head) for _ in range(num_layers)]\n",
    "        self.encoder = nn.Sequential(*encoder_list)\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.LayerNorm(hidden),\n",
    "            nn.Linear(hidden, num_classes) # for cls_token\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.patches(x)\n",
    "        out = self.emb(out)\n",
    "        if self.is_cls_token:\n",
    "            out = paddle.concat([self.cls_token.tile([out.shape[0],1,1]), out], axis=1)\n",
    "        out = out + self.pos_embedding\n",
    "        out = self.encoder(out)\n",
    "        if self.is_cls_token:\n",
    "            out = out[:,0]\n",
    "        else:\n",
    "            out = out.mean(1)\n",
    "        out = self.fc(out)\n",
    "        return out\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LabelSmoothingCrossEntropyLoss(nn.Layer):\n",
    "    def __init__(self, classes, smoothing=0.0, dim=-1):\n",
    "        super(LabelSmoothingCrossEntropyLoss, self).__init__()\n",
    "        self.confidence = 1.0 - smoothing\n",
    "        self.smoothing = smoothing\n",
    "        self.cls = classes\n",
    "        self.dim = dim\n",
    "\n",
    "    def forward(self, pred, target):\n",
    "        pred = F.log_softmax(pred, axis=self.dim)\n",
    "        with paddle.no_grad():\n",
    "            true_dist = paddle.ones_like(pred)\n",
    "            true_dist.fill_(self.smoothing / (self.cls - 1))\n",
    "            true_dist.put_along_axis_(target.unsqueeze(1), self.confidence, 1)\n",
    "        return paddle.mean(paddle.sum(-true_dist * pred, axis=self.dim))\n",
    "\n",
    "def get_scheduler(epochs, warmup_epochs, learning_rate):\n",
    "    base_scheduler = lrScheduler.CosineAnnealingDecay(learning_rate=learning_rate, T_max=epochs, eta_min=1e-5, verbose=False)\n",
    "    scheduler = lrScheduler.LinearWarmup(base_scheduler, warmup_epochs, 1e-5, learning_rate, last_epoch=-1, verbose=False)\n",
    "    return scheduler\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "schedulerTest = get_scheduler(epochs=100, warmup_epochs=5, learning_rate=1e-3)\n",
    "lr = []\n",
    "for epoch in range(100):\n",
    "    lr.append(schedulerTest.get_lr())\n",
    "    schedulerTest.step()\n",
    "plt.plot(lr)\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Model = ViT(in_c=3, num_classes=100, img_size=32, patch=8, dropout=0.5, attn_dropout=0.1, num_layers=2, hidden=96, head=3, mlp_hidden=96, is_cls_token=True)\n",
    "paddle.summary(Model, (1, 3, 32, 32))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "EPOCHS = 10\n",
    "BATCH_SIZE = 4\n",
    "NUM_CLASSES = 100\n",
    "WARMUP_EPOCHS = 2\n",
    "LR = 1e-3\n",
    "allloss=[]\n",
    "allacc=[]\n",
    "\n",
    "scheduler = get_scheduler(epochs=EPOCHS, warmup_epochs=WARMUP_EPOCHS, learning_rate=LR)\n",
    "optim = paddle.optimizer.Adam(learning_rate=scheduler, parameters=Model.parameters(), weight_decay=5e-5)\n",
    "criterion = LabelSmoothingCrossEntropyLoss(NUM_CLASSES, smoothing=0.1)\n",
    "\n",
    "train_loader = DataLoader(cifar100_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=0, drop_last=False)\n",
    "test_loader = DataLoader(cifar100_test, batch_size=BATCH_SIZE * 16, shuffle=False, num_workers=0, drop_last=False)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_epoch(model, epoch, interval=20):\n",
    "    acc_num = 0\n",
    "    total_samples = 0\n",
    "    nb = len(train_loader)\n",
    "    pbar = enumerate(train_loader)\n",
    "    pbar = tqdm(pbar, total=nb, colour='red', disable=((epoch + 1) % interval != 0))\n",
    "    pbar.set_description(f'EPOCH: {epoch:3d}')\n",
    "    print('epoch',epoch)\n",
    "    for _, (_, data) in enumerate(pbar):\n",
    "        x_data = data[0]\n",
    "        y_data = data[1]\n",
    "        predicts = model(x_data)\n",
    "        loss = criterion(predicts, y_data)\n",
    "        loss_item = loss.item()\n",
    "        acc_num += paddle.sum(predicts.argmax(1) == y_data).item()\n",
    "        total_samples += y_data.shape[0]\n",
    "        total_acc = acc_num / total_samples\n",
    "        allacc.append(total_acc)\n",
    "        allloss.append(loss_item)\n",
    "        current_lr = optim.get_lr()\n",
    "        loss.backward()\n",
    "        pbar.set_postfix(train_loss=f'{loss_item:5f}', train_acc=f'{total_acc:5f}', train_lr=f'{current_lr:5f}')\n",
    "        optim.step()\n",
    "        optim.clear_grad()\n",
    "    print('acc', allacc[-1],'loss',allloss[-1])\n",
    "    scheduler.step()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "testacc=[]\n",
    "@paddle.no_grad()\n",
    "def validation(model, epoch, interval=20):\n",
    "    model.eval()\n",
    "    acc_num = 0\n",
    "    total_samples = 0\n",
    "    nb = len(test_loader)\n",
    "    pbar = enumerate(test_loader)\n",
    "    pbar = tqdm(pbar, total=nb, colour='green', disable=((epoch + 1) % interval != 0))\n",
    "    pbar.set_description(f'EVAL')\n",
    "    for _, (_, data) in enumerate(pbar):\n",
    "        x_data = data[0]\n",
    "        y_data = data[1]\n",
    "        predicts = model(x_data)\n",
    "        acc_num += paddle.sum(predicts.argmax(1) == y_data).item()\n",
    "        total_samples += y_data.shape[0]\n",
    "        batch_acc = paddle.metric.accuracy(predicts, y_data.unsqueeze(1)).item()\n",
    "        total_acc = acc_num / total_samples\n",
    "        pbar.set_postfix(eval_batch_acc=f'{batch_acc:4f}', total_acc=f'{total_acc:4f}')\n",
    "        testacc.append(total_acc)\n",
    "    print('acc', testacc[-1])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "start = time.time()\n",
    "for epoch in range(EPOCHS):\n",
    "    train_epoch(Model, epoch)\n",
    "    validation(Model, epoch)\n",
    "    paddle.save(Model.state_dict(), str(epoch + 1) + '.pdparams')\n",
    "paddle.save(Model.state_dict(), 'finished.pdparams')\n",
    "end = time.time()\n",
    "print('Training Cost ', (end-start) / 60, 'minutes')\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
