{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torchvision.transforms import v2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 1.5125,  1.5125,  1.5125,  ...,  0.8104,  0.8104,  0.8104],\n",
       "         [ 1.5125,  1.5125,  1.5125,  ...,  0.8104,  0.8104,  0.8104],\n",
       "         [ 1.5125,  1.5125,  1.5125,  ...,  0.8104,  0.8104,  0.8104],\n",
       "         ...,\n",
       "         [-0.0116, -0.0116, -0.0116,  ..., -1.0219, -1.0219, -1.0219],\n",
       "         [-0.0116, -0.0116, -0.0116,  ..., -1.0219, -1.0219, -1.0219],\n",
       "         [-0.0116, -0.0116, -0.0116,  ..., -1.0219, -1.0219, -1.0219]],\n",
       "\n",
       "        [[ 2.4286,  2.4286,  2.4286,  ...,  1.1331,  1.1331,  1.1331],\n",
       "         [ 2.4286,  2.4286,  2.4286,  ...,  1.1331,  1.1331,  1.1331],\n",
       "         [ 2.4286,  2.4286,  2.4286,  ...,  1.1331,  1.1331,  1.1331],\n",
       "         ...,\n",
       "         [-0.3901, -0.3901, -0.3901,  ...,  1.7808,  1.7808,  1.7808],\n",
       "         [-0.3901, -0.3901, -0.3901,  ...,  1.7808,  1.7808,  1.7808],\n",
       "         [-0.3901, -0.3901, -0.3901,  ...,  1.7808,  1.7808,  1.7808]],\n",
       "\n",
       "        [[-1.5081, -1.5081, -1.5081,  ..., -0.7238, -0.7238, -0.7238],\n",
       "         [-1.5081, -1.5081, -1.5081,  ..., -0.7238, -0.7238, -0.7238],\n",
       "         [-1.5081, -1.5081, -1.5081,  ..., -0.7238, -0.7238, -0.7238],\n",
       "         ...,\n",
       "         [-0.6541, -0.6541, -0.6541,  ..., -1.1770, -1.1770, -1.1770],\n",
       "         [-0.6541, -0.6541, -0.6541,  ..., -1.1770, -1.1770, -1.1770],\n",
       "         [-0.6541, -0.6541, -0.6541,  ..., -1.1770, -1.1770, -1.1770]]])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "H, W = 32, 32\n",
    "img = torch.randint(0, 256, size=(3, H, W), dtype=torch.uint8)\n",
    "\n",
    "transforms = v2.Compose([\n",
    "    v2.RandomResizedCrop(size=(224, 224), antialias=True),\n",
    "    v2.RandomHorizontalFlip(p=0.5),\n",
    "    v2.ToDtype(torch.float32, scale=True),\n",
    "    v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n",
    "])\n",
    "img = transforms(img)\n",
    "img"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torchvision import tv_tensors\n",
    "\n",
    "img = torch.randint(0, 256, size=(3, H, W), dtype=torch.uint8)\n",
    "boxes = torch.randint(0, H//2, size=(3, 4))\n",
    "boxes[:, 2:] += boxes[:, :2]\n",
    "boxes = tv_tensors.BoundingBoxes(boxes, format=\"XYXY\", canvas_size=(H, W))\n",
    "\n",
    "img, boxes = transforms(img, boxes)\n",
    "output_dict = transforms({\"image\": img, \"boxes\": boxes})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[0.3333, 0.6667, 1.0000],\n",
      "         [0.3333, 0.6667, 1.0000]],\n",
      "\n",
      "        [[0.5000, 1.0000, 1.5000],\n",
      "         [0.5000, 1.0000, 1.5000]],\n",
      "\n",
      "        [[1.0000, 2.0000, 3.0000],\n",
      "         [1.0000, 2.0000, 3.0000]]])\n",
      "tensor([[[1., 1., 1.],\n",
      "         [1., 1., 1.]],\n",
      "\n",
      "        [[2., 2., 2.],\n",
      "         [2., 2., 2.]],\n",
      "\n",
      "        [[3., 3., 3.],\n",
      "         [3., 3., 3.]]])\n"
     ]
    }
   ],
   "source": [
    "a = torch.tensor([1, 2, 3.])\n",
    "b = torch.tensor([3, 2, 1.])\n",
    "print(a.repeat((3, 2)).reshape((3, 2, 3)) / b[:, None, None])\n",
    "print(a[:, None, None].repeat_interleave(6).reshape((3, 2, 3)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2.4931271076202393"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.empty(1).uniform_(0., float(len(a))).item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[[0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          ...,\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.]],\n",
      "\n",
      "         [[0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          ...,\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.]],\n",
      "\n",
      "         [[0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          ...,\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.]]],\n",
      "\n",
      "\n",
      "        [[[0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          ...,\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.]],\n",
      "\n",
      "         [[0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          ...,\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.]],\n",
      "\n",
      "         [[0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          ...,\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.],\n",
      "          [0., 0., 0.,  ..., 0., 0., 0.]]]])\n"
     ]
    }
   ],
   "source": [
    "# timages = torch.normal(0, 1, size=(2, 3, 800, 1120))\n",
    "images = torch.normal(0, 1, size=(3, 800, 1088))\n",
    "images = [images] * 2\n",
    "\n",
    "bts = images[0].new_full([2, 3, 800, 1120], 0)\n",
    "print(bts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "d2l",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
