{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "junior-madness",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import random\n",
    "import numpy as np\n",
    "import glob\n",
    "from PIL import Image\n",
    "from torch.utils.data import Dataset\n",
    "import os\n",
    "import time\n",
    "import torch.nn as nn\n",
    "import torch\n",
    "import numpy as np\n",
    "import random\n",
    "import glob\n",
    "from skimage import io,transform,color\n",
    "from sklearn.utils import shuffle\n",
    "import torchvision.transforms as transforms\n",
    "from PIL import Image\n",
    "import torch.optim as optim\n",
    "import torchvision.models as models\n",
    "from torch.utils.data import DataLoader\n",
    "from trash_dataloader import TrashDataset\n",
    "import time\n",
    "import seaborn\n",
    "import sys\n",
    "import glob\n",
    "import threading\n",
    "import time\n",
    "import matplotlib.pyplot as plt\n",
    "from ev_toolkit import plot_tool"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "filled-tobacco",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\n",
      "    \"class\": \"Harmful Waste_Battery board\"\n",
      "} {\n",
      "    \"class\": \"Harmful Waste_Battery board\"\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import json\n",
    "import numpy as np\n",
    "import torch\n",
    "from skimage import io,transform,color\n",
    "from torchvision import transforms, models\n",
    "# 自己的模型\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "\n",
    "MAX_EPOCH = 6\n",
    "BATCH_SIZE = 1\n",
    "LR = 0.002\n",
    "\n",
    "start_epoch = -1\n",
    "lr_decay_step = 7\n",
    "\n",
    "train_dir = '../../../../home/data/'\n",
    "num = os.listdir(train_dir)\n",
    "train_dir = os.path.join(train_dir,num[0])\n",
    "# 仅用于编码测试\n",
    "type_lst = os.listdir(train_dir)\n",
    "classes = type_lst\n",
    "norm_mean = [0.485, 0.456, 0.406]\n",
    "norm_std = [0.229, 0.224, 0.225]\n",
    "\n",
    "valid_transform = transforms.Compose([\n",
    "    transforms.Resize((112,112)),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(norm_mean, norm_std),\n",
    "])\n",
    "# 构建MyDataset实例\n",
    "valid_data = TrashDataset(data_dir = train_dir,transform=valid_transform)\n",
    "# 构建DataLoder\n",
    "valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)\n",
    "\n",
    "\n",
    "def init():\n",
    "    # 测试时选择的文件名\n",
    "    pth = '../models/202158model_resnet_20epoch/models.pkl'\n",
    "    model = torch.load(pth)\n",
    "    model.to('cpu')\n",
    "    \n",
    "    return model\n",
    "\n",
    "# 根据训练的标签设置\n",
    "class_dict = {}\n",
    "f = open('./class.txt','r')\n",
    "a = f.read()\n",
    "class_dict = eval(a)\n",
    "class_dict = {value:key for key, value in class_dict.items()}\n",
    "f.close()\n",
    "\n",
    "\n",
    "from PIL import Image\n",
    "from torchvision import transforms as T\n",
    "import torch as \n",
    "\n",
    " # Resize：缩放\n",
    "def process_image(net, input_image, args=None):\n",
    "    img = input_image\n",
    "    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
    "    img = Image.fromarray(img)\n",
    "    norm_mean = [0.485, 0.456, 0.406]\n",
    "    norm_std = [0.229, 0.224, 0.225]\n",
    "    transforms = T.Compose([T.Resize(112),T.ToTensor(),T.Normalize(norm_mean,norm_std)]) \n",
    "    img = transforms(img)\n",
    "    img = np.array(img)\n",
    "    img = img.transpose(0,2,1)\n",
    "    img = torch.tensor([img])\n",
    "    net.eval()\n",
    "    with torch.no_grad():\n",
    "        out = net(img)\n",
    "        _, pred = torch.max(out.data, 1)\n",
    "        data = json.dumps({'class': class_dict[pred[0].item()]},indent=4)\n",
    "    return data, out, pred\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    net = init()\n",
    "    labels = 139\n",
    "    path_img = '../../../../home/data/19/Harmful Waste_Battery board/bc2317b60aa246269f30d4b298344cb2.jpg'\n",
    "#     img = Image.open(path_img).convert('RGB') \n",
    "    img = cv2.imread(path_img)\n",
    "    inputs = np.asarray(img)\n",
    "#     inputs = torch.from_numpy(inputs)\n",
    "\n",
    "    dic, outputs, predicted = process_image(net, inputs)\n",
    "    print(dic, json.dumps({'class': class_dict[labels]},indent=4))\n",
    "    outputs = outputs.to('cpu')\n",
    "    predicted = predicted.to('cpu')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 131,
   "id": "smoking-wales",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = json.dumps({'class': class_dict[labels[0].item()]},indent=4)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "grave-martin",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([110]) 0\n"
     ]
    }
   ],
   "source": [
    "print(predicted,labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "hollow-wireless",
   "metadata": {},
   "outputs": [],
   "source": [
    "type_lst = os.listdir(train_dir)\n",
    "classes = type_lst\n",
    "def load_data(type_lst,file_path):\n",
    "    n = 0\n",
    "    images=[] ##新建一个空列表用于存放图片数集\n",
    "    labels=[] ##新建一个空列表用于存放标签数集\n",
    "    trash_name = {}\n",
    "\n",
    "    for j in type_lst:\n",
    "        temp_path = os.path.join(file_path,j)\n",
    "        trash_name[j] = n\n",
    "        n += 1\n",
    "        # print('\\n' + '{} is finish!'.format(j))\n",
    "    return trash_name\n",
    "trash_name = load_data(type_lst,train_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "applicable-sperm",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n",
      "1\n",
      "4\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import random\n",
    "from torch.utils.data import DataLoader\n",
    "import numpy as np\n",
    "import glob\n",
    "from PIL import Image\n",
    "from torch.utils.data import Dataset\n",
    "from data_Augmentation import *\n",
    "from oversample_Dataloader import TrashDataset\n",
    "random.seed(1)\n",
    "#确定是在测试还是在训练\n",
    "\n",
    "trash_name = {}\n",
    "f = open('/project/train/src_repo/class.txt','r')\n",
    "a = f.read()\n",
    "trash_name = eval(a)\n",
    "f.close()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "class TrashDataset(Dataset):\n",
    "    def __init__(self, data_dir, transform):\n",
    "        self.data_info = self.get_img_info(data_dir)  # data_info存储所有图片路径和标签，在DataLoader中通过index读取样本\n",
    "        self.transform = transform\n",
    "        self.labelname = trash_name\n",
    "        \n",
    "\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        path_img, label = self.data_info[index]\n",
    "        img = Image.open(path_img).convert('RGB')     # 0~255\n",
    "\n",
    "        if self.transform is not None:\n",
    "            img = self.transform(img)   # 在这里做transform，转为tensor等等\n",
    "\n",
    "        return img, label\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data_info)\n",
    "\n",
    "    @staticmethod\n",
    "    def get_img_info(data_dir):\n",
    "        num = []\n",
    "        for i in classes:\n",
    "            im_pth = '{}/{}/*.jpg'.format(data_dir, i)\n",
    "            path_file_number=glob.glob(im_pth)#或者指定文件下个数\n",
    "            num.append(len(path_file_number))\n",
    "        num = np.array(num)\n",
    "        max_num = np.max(num)\n",
    "        \n",
    "        data_info = list()\n",
    "        for root, dirs, _ in os.walk(data_dir):\n",
    "            # 遍历类别\n",
    "            for sub_dir in dirs:\n",
    "                img_names = os.listdir(os.path.join(root, sub_dir))\n",
    "                \n",
    "\n",
    "                img_names = list(filter(lambda x: x.endswith('.jpg'), img_names))\n",
    "                if len(img_names) < max_num:\n",
    "                    n = max_num / len(img_names)\n",
    "                    img_names = [val for val in img_names for i in range(int(n))]\n",
    "\n",
    "                # 遍历图片\n",
    "                for i in range(len(img_names)):\n",
    "                    img_name = img_names[i]\n",
    "                    path_img = os.path.join(root, sub_dir, img_name)\n",
    "                    label = trash_name[sub_dir]\n",
    "                    data_info.append((path_img, int(label)))\n",
    "        if len(data_info) == 0:\n",
    "            raise Exception('\\n data dir: {} is empty!'.format(data_dir))\n",
    "\n",
    "        return data_info\n",
    "    \n",
    "\n",
    "if __name__ == '__main__':\n",
    "    base_dir = \"../../../../home/data\"\n",
    "    num = os.listdir(base_dir)\n",
    "    temp = os.path.join(base_dir,num[0])\n",
    "    # 仅用于编码测试\n",
    "    type_lst = os.listdir(temp)\n",
    "    classes = type_lst\n",
    "    # step 1/5 数据\n",
    "    # 获取数据集文件夹下的类别文件夹名称赋值为type_list\n",
    "    train_dir = './split_data/train'\n",
    "    transform = get_transforms(input_size=224, test_size=224, backbone=None)\n",
    "    train_data = TrashDataset(data_dir = train_dir,transform=transform['train'])\n",
    "    train_loader = DataLoader(dataset=train_data, batch_size=8, shuffle=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "ongoing-street",
   "metadata": {},
   "outputs": [],
   "source": [
    "for j, data in enumerate(train_loader):\n",
    "    inputs, labels = data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "streaming-manchester",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[[[ 1.8379,  1.7865,  1.7694,  ...,  1.8722,  1.8722,  1.8550],\n",
       "           [ 1.8722,  1.7694,  1.7009,  ...,  1.7865,  1.8379,  1.8208],\n",
       "           [ 1.8037,  1.6838,  1.6495,  ...,  1.6324,  1.8037,  1.8208],\n",
       "           ...,\n",
       "           [ 1.5125,  1.6324,  1.6838,  ...,  0.3823,  0.0912, -0.5596],\n",
       "           [ 1.6495,  1.7180,  1.7523,  ...,  0.4679, -0.3198, -0.7993],\n",
       "           [ 1.7523,  1.7352,  1.6838,  ...,  0.4166, -0.5596, -0.3198]],\n",
       " \n",
       "          [[ 1.4832,  1.4307,  1.4132,  ...,  1.5182,  1.5182,  1.5007],\n",
       "           [ 1.5357,  1.4132,  1.3606,  ...,  1.4482,  1.4832,  1.4832],\n",
       "           [ 1.4657,  1.3431,  1.3081,  ...,  1.3081,  1.4832,  1.5182],\n",
       "           ...,\n",
       "           [ 0.7654,  0.8880,  0.9405,  ..., -0.3550, -0.5301, -1.1779],\n",
       "           [ 0.8529,  0.9230,  0.9580,  ..., -0.2850, -0.9853, -1.4580],\n",
       "           [ 0.9055,  0.9055,  0.8354,  ..., -0.3550, -1.2654, -0.9853]],\n",
       " \n",
       "          [[ 0.4439,  0.3916,  0.3742,  ...,  0.4962,  0.4788,  0.4614],\n",
       "           [ 0.4788,  0.3742,  0.3219,  ...,  0.4439,  0.4614,  0.4439],\n",
       "           [ 0.4439,  0.3219,  0.2871,  ...,  0.3219,  0.4788,  0.4962],\n",
       "           ...,\n",
       "           [-1.6824, -1.5779, -1.5256,  ..., -0.8981, -0.9853, -1.2467],\n",
       "           [-1.6127, -1.5430, -1.5081,  ..., -0.7761, -1.3861, -1.5953],\n",
       "           [-1.5604, -1.5779, -1.6302,  ..., -0.7587, -1.5604, -1.1770]]],\n",
       " \n",
       " \n",
       "         [[[ 2.2318,  2.2147,  2.2147,  ...,  2.1119,  2.1975,  2.2318],\n",
       "           [ 2.1119,  2.0777,  1.9920,  ...,  2.0948,  2.1804,  2.1975],\n",
       "           [ 2.0605,  1.9235,  1.8722,  ...,  2.0263,  2.1462,  2.2147],\n",
       "           ...,\n",
       "           [ 1.8550,  1.9064,  1.9407,  ...,  2.0777,  2.0777,  2.0777],\n",
       "           [ 1.9407,  1.9749,  2.0092,  ...,  2.2147,  2.2147,  2.2147],\n",
       "           [ 2.1633,  2.1633,  2.1633,  ...,  2.2318,  2.2318,  2.2318]],\n",
       " \n",
       "          [[ 2.4111,  2.3936,  2.3936,  ...,  2.2885,  2.3761,  2.4111],\n",
       "           [ 2.2885,  2.2535,  2.1660,  ...,  2.2710,  2.3585,  2.3761],\n",
       "           [ 2.2360,  2.0959,  2.0434,  ...,  2.2010,  2.3235,  2.3936],\n",
       "           ...,\n",
       "           [ 2.0259,  2.0784,  2.1134,  ...,  2.2535,  2.2535,  2.2535],\n",
       "           [ 2.1134,  2.1485,  2.1835,  ...,  2.3936,  2.3936,  2.3936],\n",
       "           [ 2.3410,  2.3410,  2.3410,  ...,  2.4111,  2.4111,  2.4111]],\n",
       " \n",
       "          [[ 2.6226,  2.6051,  2.6051,  ...,  2.5006,  2.5877,  2.6226],\n",
       "           [ 2.5006,  2.4657,  2.3786,  ...,  2.4831,  2.5703,  2.5877],\n",
       "           [ 2.4483,  2.3088,  2.2566,  ...,  2.4134,  2.5354,  2.6051],\n",
       "           ...,\n",
       "           [ 2.2391,  2.2914,  2.3263,  ...,  2.4657,  2.4657,  2.4657],\n",
       "           [ 2.3263,  2.3611,  2.3960,  ...,  2.6051,  2.6051,  2.6051],\n",
       "           [ 2.5529,  2.5529,  2.5529,  ...,  2.6226,  2.6226,  2.6226]]],\n",
       " \n",
       " \n",
       "         [[[ 2.2489,  2.2489,  2.2318,  ...,  2.2318,  2.2318,  2.2489],\n",
       "           [ 2.2318,  2.1975,  2.1804,  ...,  2.1462,  2.2147,  2.2489],\n",
       "           [ 2.1804,  2.1633,  2.1290,  ...,  2.0948,  2.1804,  2.2318],\n",
       "           ...,\n",
       "           [ 2.1462,  2.1290,  2.1462,  ...,  2.2318,  2.2489,  2.2489],\n",
       "           [ 2.1290,  2.0948,  2.1462,  ...,  2.2489,  2.2489,  2.2489],\n",
       "           [ 2.2318,  2.1804,  2.2147,  ...,  2.2489,  2.2489,  2.2489]],\n",
       " \n",
       "          [[ 2.4286,  2.4286,  2.4111,  ...,  2.4111,  2.4111,  2.4286],\n",
       "           [ 2.4111,  2.3761,  2.3585,  ...,  2.3235,  2.3936,  2.4286],\n",
       "           [ 2.3585,  2.3410,  2.3060,  ...,  2.2710,  2.3585,  2.4111],\n",
       "           ...,\n",
       "           [ 2.3235,  2.3060,  2.3235,  ...,  2.4111,  2.4286,  2.4286],\n",
       "           [ 2.3060,  2.2710,  2.3235,  ...,  2.4286,  2.4286,  2.4286],\n",
       "           [ 2.4111,  2.3585,  2.3936,  ...,  2.4286,  2.4286,  2.4286]],\n",
       " \n",
       "          [[ 2.6400,  2.6400,  2.6226,  ...,  2.6226,  2.6226,  2.6400],\n",
       "           [ 2.6226,  2.5877,  2.5703,  ...,  2.5354,  2.6051,  2.6400],\n",
       "           [ 2.5703,  2.5529,  2.5180,  ...,  2.4831,  2.5703,  2.6226],\n",
       "           ...,\n",
       "           [ 2.5354,  2.5180,  2.5354,  ...,  2.6226,  2.6400,  2.6400],\n",
       "           [ 2.5180,  2.4831,  2.5354,  ...,  2.6400,  2.6400,  2.6400],\n",
       "           [ 2.6226,  2.5703,  2.6051,  ...,  2.6400,  2.6400,  2.6400]]],\n",
       " \n",
       " \n",
       "         [[[-0.4054, -0.3883, -0.3883,  ..., -0.9705, -0.9877, -0.9534],\n",
       "           [-0.3883, -0.3027, -0.3198,  ..., -0.8849, -0.9534, -0.9877],\n",
       "           [-0.3198, -0.3198, -0.3541,  ..., -0.8507, -0.9020, -1.0219],\n",
       "           ...,\n",
       "           [-0.7479, -0.6794, -0.6452,  ..., -0.4568, -0.4739, -0.4739],\n",
       "           [-0.6794, -0.6794, -0.5938,  ..., -0.4226, -0.4226, -0.4226],\n",
       "           [-0.6452, -0.6281, -0.6452,  ..., -0.3712, -0.3712, -0.3883]],\n",
       " \n",
       "          [[-0.3025, -0.2850, -0.2850,  ..., -0.8452, -0.8627, -0.8277],\n",
       "           [-0.2850, -0.1975, -0.2150,  ..., -0.7752, -0.8277, -0.8627],\n",
       "           [-0.2150, -0.2150, -0.2500,  ..., -0.7577, -0.7927, -0.8978],\n",
       "           ...,\n",
       "           [-0.6176, -0.5476, -0.5126,  ..., -0.3025, -0.3200, -0.3200],\n",
       "           [-0.5476, -0.5476, -0.4601,  ..., -0.2675, -0.2675, -0.2675],\n",
       "           [-0.5126, -0.4951, -0.5126,  ..., -0.2150, -0.2150, -0.2500]],\n",
       " \n",
       "          [[-0.4275, -0.4101, -0.4101,  ..., -0.8633, -0.8807, -0.8458],\n",
       "           [-0.4101, -0.3230, -0.3404,  ..., -0.7761, -0.8458, -0.8807],\n",
       "           [-0.3055, -0.3230, -0.3927,  ..., -0.7587, -0.8110, -0.9156],\n",
       "           ...,\n",
       "           [-0.4798, -0.4101, -0.3753,  ..., -0.1661, -0.1835, -0.1835],\n",
       "           [-0.4101, -0.4101, -0.3230,  ..., -0.1312, -0.1312, -0.1312],\n",
       "           [-0.3753, -0.3578, -0.3753,  ..., -0.0790, -0.0790, -0.1138]]]],\n",
       "        device='cuda:0'),\n",
       " tensor([  5,  67, 136, 118], device='cuda:0'))"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "inputs.to(device), labels.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "resistant-treatment",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "provincial-latvia",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
