{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "v3dqV53dowfA" }, "source": [ "Installs" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Gq2cQlRrzlz8", "outputId": "da9558fd-aaa2-496a-e81f-f891eeacd93f", "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Requirement already satisfied: transformers in /usr/local/lib/python3.9/dist-packages (4.28.1)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from transformers) (3.11.0)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.9/dist-packages (from transformers) (23.1)\n", "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.9/dist-packages (from transformers) (4.65.0)\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.9/dist-packages (from transformers) (1.22.4)\n", "Requirement already satisfied: huggingface-hub<1.0,>=0.11.0 in /usr/local/lib/python3.9/dist-packages (from transformers) (0.14.0)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.9/dist-packages (from transformers) (2.27.1)\n", "Requirement already satisfied: tokenizers!=0.11.3,<0.14,>=0.11.1 in /usr/local/lib/python3.9/dist-packages (from transformers) (0.13.3)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.9/dist-packages (from transformers) (6.0)\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.9/dist-packages (from transformers) (2022.10.31)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0,>=0.11.0->transformers) (4.5.0)\n", "Requirement already satisfied: fsspec in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0,>=0.11.0->transformers) (2023.4.0)\n", "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (1.26.15)\n", "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (2.0.12)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (3.4)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests->transformers) (2022.12.7)\n", "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Requirement already satisfied: datasets in /usr/local/lib/python3.9/dist-packages (2.11.0)\n", "Requirement already satisfied: aiohttp in /usr/local/lib/python3.9/dist-packages (from datasets) (3.8.4)\n", "Requirement already satisfied: packaging in /usr/local/lib/python3.9/dist-packages (from datasets) (23.1)\n", "Requirement already satisfied: dill<0.3.7,>=0.3.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (0.3.6)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.9/dist-packages (from datasets) (6.0)\n", "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (2.27.1)\n", "Requirement already satisfied: xxhash in /usr/local/lib/python3.9/dist-packages (from datasets) (3.2.0)\n", "Requirement already satisfied: huggingface-hub<1.0.0,>=0.11.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (0.14.0)\n", "Requirement already satisfied: pandas in /usr/local/lib/python3.9/dist-packages (from datasets) (1.5.3)\n", "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.9/dist-packages (from datasets) (4.65.0)\n", "Requirement already satisfied: fsspec[http]>=2021.11.1 in /usr/local/lib/python3.9/dist-packages (from datasets) (2023.4.0)\n", "Requirement already satisfied: responses<0.19 in /usr/local/lib/python3.9/dist-packages (from datasets) (0.18.0)\n", "Requirement already satisfied: multiprocess in /usr/local/lib/python3.9/dist-packages (from datasets) (0.70.14)\n", "Requirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.9/dist-packages (from datasets) (9.0.0)\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.9/dist-packages (from datasets) (1.22.4)\n", "Requirement already satisfied: charset-normalizer<4.0,>=2.0 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (2.0.12)\n", "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (1.3.1)\n", "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (1.3.3)\n", "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (1.9.1)\n", "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (6.0.4)\n", "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (4.0.2)\n", "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.9/dist-packages (from aiohttp->datasets) (23.1.0)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (4.5.0)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (3.11.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.9/dist-packages (from requests>=2.19.0->datasets) (2022.12.7)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.9/dist-packages (from requests>=2.19.0->datasets) (3.4)\n", "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.9/dist-packages (from requests>=2.19.0->datasets) (1.26.15)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.9/dist-packages (from pandas->datasets) (2022.7.1)\n", "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.9/dist-packages (from pandas->datasets) (2.8.2)\n", "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.9/dist-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n" ] } ], "source": [ "!pip install transformers\n", "!pip install datasets " ] }, { "cell_type": "markdown", "metadata": { "id": "ATWxWZI-o1f0" }, "source": [ "Imports" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "QgwTmd-BeNl-" }, "outputs": [], "source": [ "from datasets import load_dataset\n", "import torch\n", "from tqdm import tqdm\n", "from transformers import RobertaTokenizer, RobertaConfig, RobertaModel\n", "from transformers import RobertaConfig, RobertaTokenizer, RobertaForMaskedLM, pipeline, FillMaskPipeline\n" ] }, { "cell_type": "markdown", "metadata": { "id": "lRdYOKbVcfAd" }, "source": [ "#Evaluate Models With Accuracy\n", "\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 87, "referenced_widgets": [ "1215458d6e6e4c08a106d065440e04bb", "f3c940f54cd84e839ffcdc56e86f5ccd", "81130b6c67794e409d4a4808d995d778", "326ae8ed30094a30b4911e98c96858fe", "d1d636fedf5447a18c7b9edaec33ffb0", "1ffb5c0070db4b5abd1f70b0b04e11e4", "1afa7ca9c29d43589536ccdfaf0d369f", "aa4b1d988b2148b8b7a40bacb4c64e6c", "6c3650a634274e518d9125d89afb75ac", "9b8e16c408c846f092edf7b9050c8ad2", "ad755941d549436a8f2d8cb371368771" ] }, "id": "W89z1FepeGx5", "outputId": "2ee27614-39fb-441f-8ad0-9226ad82461b", "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:datasets.builder:Found cached dataset parquet (/root/.cache/huggingface/datasets/martiwey___parquet/martiwey--code-search-net-clean-d9d3ed83390f9689/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "1215458d6e6e4c08a106d065440e04bb", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/1 [00:00.print(\\\"hello world\\\")\")\n", "# print(results)\n", "# Print the results\n", "# for result in results:\n", "# print(result['sequence'], result['score'])" ] }, { "cell_type": "code", "execution_count": 83, "metadata": { "id": "MBnSGGdCk3_C" }, "outputs": [], "source": [ "import random\n", "# random.seed(42)\n", "\n", "def masked_code(func_code_tokens):\n", " code = func_code_tokens\n", " print(func_code_tokens)\n", " while True:\n", " random_idx = random.randint(0, len(code) - 1)\n", " code_masked_token = code[random_idx]\n", " if(len(code_masked_token.split(' ')) <= 1):\n", " break\n", " code[random_idx] = ''\n", " code_masked = \" \".join(code)\n", " return code_masked, code_masked_token" ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "id": "JzSKKDsUGj91" }, "outputs": [], "source": [ "def predict_mask(\n", " masked_code:str,\n", " mask_pipeline=fill_mask\n", "):\n", " outputs = fill_mask(masked_code)\n", " return outputs" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "id": "Xr-A92Lo2tST" }, "outputs": [], "source": [ "def evaluation(outputs, masked_tokens):\n", " count = 0\n", " for idx, dic in enumerate(outputs):\n", " print(masked_tokens[idx])\n", " for dic_idx in range(len(dic)):\n", " print(idx,\" \",dic[dic_idx][\"token_str\"])\n", " if(dic[dic_idx][\"token_str\"].strip().lower() == masked_tokens[idx].strip().lower()):\n", " count += 1\n", " break\n", "\n", "\n", " return count / len(masked_tokens)\n", "\n", " # for key, values in dic.items():\n", " # print(key, \":- \" , values)\n", " " ] }, { "cell_type": "code", "execution_count": 79, "metadata": { "id": "8cdxbsx7yuf8" }, "outputs": [], "source": [ "# '''for(int i = 0; i < 1321 ; i++){ \n", "# i += \n", "# } '''\n", "\n", "\n", "def main_eval(dataset_code, num_datapoints):\n", " size = num_datapoints\n", " count = 0\n", " codes = []\n", " masked_tokens = []\n", " outputs = []\n", " # languages = ['java']\n", " for idx in tqdm(range(num_datapoints)):\n", " idx_data = dataset_code[idx]\n", " # print('prgramming language: ', idx_data['language'])\n", "\n", " masked_data = masked_code(idx_data[\"func_code_tokens\"])\n", " print(type(masked_data))\n", " # print(f'True token: {masked_token}')\n", " # print(len(code))\n", " if((masked_data is not None) and (len(masked_data) <= 1700)):\n", " # print('passed')\n", " code, masked_token = masked_data\n", " output = predict_mask(code)\n", " codes.append(code)\n", " masked_tokens.append(masked_token)\n", " outputs.append(output)\n", " # true_token_id = tokenizer.encode(masked_token, return_tensors='pt')\n", " # print(true_token_id, true_token_id.shape)\n", " # break\n", " \n", "\n", " return codes, masked_tokens, outputs\n", "\n", "# \"\"\"\n", "# if(evaluation(outputs, masked_token)):\n", "# count += 1\n", " \n", "# accuracy = (count/size)\n", "# print(\"\\naccuracy: \", accuracy)\n", "# \"\"\"" ] }, { "cell_type": "code", "execution_count": 85, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ZbNPLnCdw_H7", "outputId": "1c5abeea-188d-42a5-cc34-328162438b97" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['def', 'train', '(', 'train_dir', ',', 'model_save_path', '=', 'None', ',', 'n_neighbors', '=', 'None', ',', 'knn_algo', '=', '', ',', 'verbose', '=', 'False', ')', ':', 'X', '=', '[', ']', 'y', '=', '[', ']', '# Loop through each person in the training set', 'for', 'class_dir', 'in', 'os', '.', 'listdir', '(', 'train_dir', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'os', '.', 'path', '.', 'join', '(', 'train_dir', ',', 'class_dir', ')', ')', ':', 'continue', '# Loop through each training image for the current person', 'for', 'img_path', 'in', 'image_files_in_folder', '(', 'os', '.', 'path', '.', 'join', '', 'train_dir', ',', 'class_dir', ')', ')', ':', 'image', '=', 'face_recognition', '.', 'load_image_file', '(', 'img_path', ')', 'face_bounding_boxes', '=', 'face_recognition', '.', 'face_locations', '(', 'image', ')', 'if', 'len', '(', 'face_bounding_boxes', ')', '!=', '1', ':', '# If there are no people (or too many people) in a training image, skip the image.', 'if', '', ':', 'print', '(', '\"Image {} not suitable for training: {}\"', '.', 'format', '(', 'img_path', ',', '\"Didn\\'t find a face\"', 'if', 'len', '(', 'face_bounding_boxes', ')', '<', '1', 'else', '\"Found more than one face\"', ')', ')', 'else', ':', '', 'X', '.', 'append', '(', 'face_recognition', '.', 'face_encodings', '(', 'image', ',', 'known_face_locations', '=', 'face_bounding_boxes', ')', '[', '0', ']', ')', 'y', '.', 'append', '(', 'class_dir', ')', '# Determine how many neighbors to use for weighting in the KNN classifier', 'if', 'n_neighbors', 'is', 'None', ':', 'n_neighbors', '=', 'int', '(', 'round', '(', 'math', '.', 'sqrt', '(', 'len', '(', 'X', ')', ')', ')', ')', 'if', 'verbose', '', 'print', '(', '\"Chose n_neighbors automatically:\"', ',', 'n_neighbors', ')', '', 'knn_clf', '', 'neighbors', '', 'KNeighborsClassifier', '(', 'n_neighbors', '=', 'n_neighbors', ',', 'algorithm', '=', 'knn_algo', ',', 'weights', '=', \"'distance'\", ')', 'knn_clf', '.', 'fit', '(', 'X', ',', 'y', ')', '# Save the trained KNN classifier', 'if', 'model_save_path', '', 'not', 'None', ':', 'with', 'open', '(', 'model_save_path', ',', \"'wb'\", ')', 'as', 'f', ':', 'pickle', '.', 'dump', '(', 'knn_clf', ',', 'f', ')', 'return', 'knn_clf']\n" ] } ], "source": [ "print(dataset_python[0]['func_code_tokens'])" ] }, { "cell_type": "code", "execution_count": 80, "metadata": { "id": "xNFO_yZQxhiz" }, "outputs": [], "source": [ "masked_code(dataset_python[0]['func_code_tokens'])" ] }, { "cell_type": "code", "execution_count": 84, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "pW4K5836PPj5", "outputId": "455dd4a7-08f5-47e6-d700-141cbea44dda" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 5/5 [00:00<00:00, 10082.46it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "['def', 'train', '(', 'train_dir', ',', 'model_save_path', '=', 'None', ',', 'n_neighbors', '=', 'None', ',', 'knn_algo', '=', '', ',', 'verbose', '=', 'False', ')', ':', 'X', '=', '[', ']', 'y', '=', '[', ']', '# Loop through each person in the training set', 'for', 'class_dir', 'in', 'os', '.', 'listdir', '(', 'train_dir', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'os', '.', 'path', '.', 'join', '(', 'train_dir', ',', 'class_dir', ')', ')', ':', 'continue', '# Loop through each training image for the current person', 'for', 'img_path', 'in', 'image_files_in_folder', '(', 'os', '.', 'path', '.', 'join', '', 'train_dir', ',', 'class_dir', ')', ')', ':', 'image', '=', 'face_recognition', '.', 'load_image_file', '(', 'img_path', ')', 'face_bounding_boxes', '=', 'face_recognition', '.', 'face_locations', '(', 'image', ')', 'if', 'len', '(', 'face_bounding_boxes', ')', '!=', '1', ':', '# If there are no people (or too many people) in a training image, skip the image.', 'if', '', ':', 'print', '(', '\"Image {} not suitable for training: {}\"', '.', 'format', '(', 'img_path', ',', '\"Didn\\'t find a face\"', 'if', 'len', '(', 'face_bounding_boxes', ')', '<', '1', 'else', '\"Found more than one face\"', ')', ')', 'else', ':', '', 'X', '.', 'append', '(', 'face_recognition', '.', 'face_encodings', '(', 'image', ',', 'known_face_locations', '=', 'face_bounding_boxes', ')', '[', '0', ']', ')', 'y', '.', 'append', '(', 'class_dir', ')', '# Determine how many neighbors to use for weighting in the KNN classifier', 'if', 'n_neighbors', 'is', 'None', ':', 'n_neighbors', '=', 'int', '(', 'round', '(', 'math', '.', 'sqrt', '(', 'len', '(', 'X', ')', ')', ')', ')', 'if', 'verbose', '', 'print', '(', '\"Chose n_neighbors automatically:\"', ',', 'n_neighbors', ')', '', 'knn_clf', '', 'neighbors', '', 'KNeighborsClassifier', '(', 'n_neighbors', '=', 'n_neighbors', ',', 'algorithm', '=', 'knn_algo', ',', 'weights', '=', \"'distance'\", ')', 'knn_clf', '.', 'fit', '(', 'X', ',', 'y', ')', '# Save the trained KNN classifier', 'if', 'model_save_path', '', 'not', 'None', ':', 'with', 'open', '(', 'model_save_path', ',', \"'wb'\", ')', 'as', 'f', ':', 'pickle', '.', 'dump', '(', 'knn_clf', ',', 'f', ')', 'return', 'knn_clf']\n", "\n", "['def', 'predict', '(', 'X_img_path', ',', 'knn_clf', '=', 'None', ',', 'model_path', '=', 'None', ',', 'distance_threshold', '=', '0.6', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'X_img_path', ')', 'or', 'os', '.', 'path', '.', 'splitext', '(', 'X_img_path', ')', '[', '1', ']', '[', '1', ':', ']', 'not', 'in', 'ALLOWED_EXTENSIONS', ':', 'raise', 'Exception', '(', '\"Invalid image path: {}\"', '.', 'format', '(', 'X_img_path', ')', ')', 'if', 'knn_clf', 'is', 'None', 'and', 'model_path', '', 'None', ':', 'raise', 'Exception', '(', '\"Must supply knn classifier either thourgh knn_clf or model_path\"', ')', '# Load a trained KNN model (if one was passed in)', 'if', 'knn_clf', 'is', 'None', ':', 'with', 'open', '(', 'model_path', ',', \"'rb'\", ')', 'as', 'f', ':', 'knn_clf', '=', 'pickle', '.', 'load', '(', 'f', ')', '# Load image file and find face locations', 'X_img', '=', 'face_recognition', '.', 'load_image_file', '(', 'X_img_path', ')', 'X_face_locations', '=', 'face_recognition', '.', 'face_locations', '(', 'X_img', ')', '# If no faces are found in the image, return an empty result.', 'if', 'len', '(', 'X_face_locations', ')', '', '0', ':', 'return', '[', ']', '# Find encodings for faces in the test iamge', 'faces_encodings', '=', 'face_recognition', '.', 'face_encodings', '(', 'X_img', ',', 'known_face_locations', '=', 'X_face_locations', ')', '# Use the KNN model to find the best matches for the test face', 'closest_distances', '=', 'knn_clf', '.', 'kneighbors', '(', 'faces_encodings', ',', 'n_neighbors', '=', '1', ')', 'are_matches', '=', '[', 'closest_distances', '[', '0', ']', '[', 'i', ']', '[', '0', ']', '<=', 'distance_threshold', '', 'i', 'in', 'range', '(', 'len', '(', 'X_face_locations', ')', ')', ']', \"# Predict classes and remove classifications that aren't within the threshold\", 'return', '[', '(', 'pred', ',', 'loc', ')', 'if', 'rec', 'else', '(', '\"unknown\"', ',', 'loc', ')', 'for', 'pred', ',', 'loc', ',', 'rec', 'in', 'zip', '(', 'knn_clf', '.', 'predict', '(', 'faces_encodings', ')', ',', 'X_face_locations', ',', 'are_matches', ')', ']']\n", "\n", "['def', 'show_prediction_labels_on_image', '(', 'img_path', ',', 'predictions', ')', ':', 'pil_image', '=', 'Image', '.', 'open', '(', 'img_path', ')', '.', '', '(', '\"RGB\"', ')', 'draw', '=', 'ImageDraw', '.', 'Draw', '(', 'pil_image', ')', 'for', 'name', ',', '(', 'top', ',', 'right', ',', 'bottom', ',', 'left', ')', 'in', 'predictions', ':', '', 'draw', '.', 'rectangle', '(', '(', '(', 'left', ',', 'top', ')', ',', '(', 'right', '', 'bottom', ')', ')', ',', 'outline', '=', '(', '0', ',', '0', ',', '255', ')', ')', \"# There's a bug in Pillow where it blows up with non-UTF-8 text\", '# when using the default bitmap font', 'name', '=', 'name', '.', 'encode', '(', '\"UTF-8\"', ')', '# Draw a label with a name below the face', 'text_width', ',', 'text_height', '=', 'draw', '.', 'textsize', '(', 'name', ')', 'draw', '.', 'rectangle', '(', '(', '(', 'left', ',', 'bottom', '-', 'text_height', '-', '10', ')', ',', '(', 'right', ',', 'bottom', ')', ')', ',', 'fill', '=', '(', '0', ',', '0', ',', '255', ')', ',', 'outline', '=', '(', '0', ',', '0', ',', '255', ')', ')', 'draw', '.', 'text', '(', '(', 'left', '+', '6', ',', 'bottom', '-', 'text_height', '-', '5', ')', ',', 'name', ',', 'fill', '=', '(', '255', ',', '255', ',', '255', ',', '255', ')', ')', '# Remove the drawing library from memory as per the Pillow docs', 'del', 'draw', '# Display the resulting image', 'pil_image', '.', 'show', '(', ')']\n", "\n", "['def', '_rect_to_css', '(', 'rect', ')', ':', '', 'rect', '.', '', '(', ')', ',', 'rect', '.', 'right', '(', ')', ',', 'rect', '.', '', '(', ')', ',', 'rect', '.', 'left', '(', ')']\n", "\n", "['def', '_trim_css_to_bounds', '(', 'css', ',', 'image_shape', ')', ':', 'return', 'max', '(', 'css', '[', '0', ']', ',', '', ')', ',', '', '(', 'css', '[', '1', ']', ',', 'image_shape', '[', '1', '', ')', ',', 'min', '(', 'css', '[', '2', ']', ',', 'image_shape', '[', '0', ']', ')', ',', 'max', '(', 'css', '[', '3', ']', ',', '0', ')']\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "codes, masked_tokens, outputs = main_eval(dataset_python, 5)" ] }, { "cell_type": "code", "execution_count": 37, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Mz9HN_oFa3JJ", "outputId": "202ca4fa-6442-4e79-b8c8-938fa2e601f8" }, "outputs": [ { "data": { "text/plain": [ "{'score': 0.0025508219841867685,\n", " 'token': 41006,\n", " 'token_str': ' ((',\n", " 'sequence': ' def flatMapValues ( self, f ) : flat_map_fn = lambda kv : ( ((kv [ 0 ], x ) for x in f ( kv [ 1 ] ) ) return self. flatMap ( flat_map_fn, preservesPartitioning = True )'}" ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "outputs[1]" ] }, { "cell_type": "code", "execution_count": 44, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "_xx6ZsHl03VS", "outputId": "4b22be08-beb7-4270-86b5-d73354b14e48" }, "outputs": [ { "data": { "text/plain": [ "97" ] }, "execution_count": 44, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(masked_tokens)" ] }, { "cell_type": "code", "execution_count": 45, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "UF3Y4_Lf1ZaT", "outputId": "f98ad509-256f-40d0-8d22-44e2fefa49eb" }, "outputs": [ { "data": { "text/plain": [ "97" ] }, "execution_count": 45, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(outputs)" ] }, { "cell_type": "code", "execution_count": 49, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "biZNUD_0kVbz", "outputId": "57549f94-ef4f-4cee-c0c8-7b07ce487a2e" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "# Create and train the KNN classifier [0, 849, 21384, 8, 2341, 5, 229, 20057, 1380, 24072, 2]\n", "[[{'score': 0.9999772310256958, 'token': 36, 'token_str': ' (', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. join (train_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 1.5854531739023514e-05, 'token': 1215, 'token_str': '_', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. join_train_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 6.675792974419892e-06, 'token': 1640, 'token_str': '(', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. join(train_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 3.4287005235000834e-08, 'token': 45803, 'token_str': \"('\", 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. join(\\'train_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 3.329085629388828e-08, 'token': 48461, 'token_str': '((', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. join((train_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}], [{'score': 0.19183361530303955, 'token': 18134, 'token_str': ' _', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else : _X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.1854991912841797, 'token': 849, 'token_str': ' #', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else : #X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.018575862050056458, 'token': 579, 'token_str': ' s', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else : sX. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.017294326797127724, 'token': 295, 'token_str': ' n', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else : nX. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.01343518402427435, 'token': 2341, 'token_str': ' train', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else : trainX. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}], [{'score': 0.7995448112487793, 'token': 18134, 'token_str': ' _', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors ) _knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.037525784224271774, 'token': 740, 'token_str': ' c', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors ) cknn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.030202515423297882, 'token': 449, 'token_str': ' k', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors ) kknn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.010631969198584557, 'token': 475, 'token_str': ' m', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors ) mknn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.008717108517885208, 'token': 27148, 'token_str': ' __', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors ) __knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}], [{'score': 0.8977885842323303, 'token': 1215, 'token_str': '_', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clf_neighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.03487177938222885, 'token': 449, 'token_str': ' k', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clf kneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.030131587758660316, 'token': 330, 'token_str': 'k', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfkneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.006537788547575474, 'token': 5457, 'token_str': ' =', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clf =neighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.005260083824396133, 'token': 229, 'token_str': ' K', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clf Kneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}], [{'score': 0.996228814125061, 'token': 16, 'token_str': ' is', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_path isnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.0030178141314536333, 'token': 1215, 'token_str': '_', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_path_not None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.0003393710358068347, 'token': 354, 'token_str': 'is', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_pathisnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 0.00015202470240183175, 'token': 965, 'token_str': ' isn', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_path isnnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}, {'score': 7.242819265229627e-05, 'token': 114, 'token_str': ' if', 'sequence': ' def train ( train_dir, model_save_path = None, n_neighbors = None, knn_algo = \\'ball_tree\\', verbose = False ) : X = [ ] y = [ ] # Loop through each person in the training set for class_dir in os. listdir ( train_dir ) : if not os. path. isdir ( os. path. join ( train_dir, class_dir ) ) : continue # Loop through each training image for the current person for img_path in image_files_in_folder ( os. path. jointrain_dir, class_dir ) ) : image = face_recognition. load_image_file ( img_path ) face_bounding_boxes = face_recognition. face_locations ( image ) if len ( face_bounding_boxes )!= 1 : # If there are no people (or too many people) in a training image, skip the image. if verbose : print ( \"Image {} not suitable for training: {}\". format ( img_path, \"Didn\\'t find a face\" if len ( face_bounding_boxes ) < 1 else \"Found more than one face\" ) ) else :X. append ( face_recognition. face_encodings ( image, known_face_locations = face_bounding_boxes ) [ 0 ] ) y. append ( class_dir ) # Determine how many neighbors to use for weighting in the KNN classifier if n_neighbors is None : n_neighbors = int ( round ( math. sqrt ( len ( X ) ) ) ) if verbose : print ( \"Chose n_neighbors automatically:\", n_neighbors )knn_clfneighbors. KNeighborsClassifier ( n_neighbors = n_neighbors, algorithm = knn_algo, weights = \\'distance\\' ) knn_clf. fit ( X, y ) # Save the trained KNN classifier if model_save_path ifnot None : with open ( model_save_path, \\'wb\\' ) as f : pickle. dump ( knn_clf, f ) return knn_clf'}]]\n" ] } ], "source": [ "print(masked_tokens[0], ' ', tokenizer.encode(masked_tokens[0]))\n", "print(outputs[0])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 347 }, "id": "nRGLMlvna3JJ", "outputId": "ec0040c3-b04e-4783-ae8e-e3b7defbb454" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(\n" ] }, { "ename": "KeyError", "evalue": "ignored", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mevaluation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmasked_tokens\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;31m# for idx, dic in enumerate(outputs):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;31m# print(idx, \"- \", dic)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m\u001b[0m in \u001b[0;36mevaluation\u001b[0;34m(outputs, masked_tokens)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmasked_tokens\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0midx\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mdic_idx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdic\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0midx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\" \"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdic\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdic_idx\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"token_str\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;32mif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdic\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdic_idx\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"token_str\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlower\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mmasked_tokens\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0midx\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlower\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mcount\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyError\u001b[0m: 0" ] } ], "source": [ "evaluation(outputs, masked_tokens)\n", "# for idx, dic in enumerate(outputs):\n", "# print(idx, \"- \", dic)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "LICXJBhea3JJ", "outputId": "61d4bf4c-7837-439a-9be7-1c4c0be4bbcb" }, "outputs": [ { "data": { "text/plain": [ "list" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "type(outputs)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "KXgIhZnna3JK", "outputId": "4a1385a5-3ef2-4d31-b04b-fafeeef6c5d0" }, "outputs": [ { "data": { "text/plain": [ "[0, 15110, 2]" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tokenizer.encode('public')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "IeIqSJZKa3JK", "outputId": "9204882f-e19d-4e58-c9bf-b790bd58e679" }, "outputs": [ { "data": { "text/plain": [ "False" ] }, "execution_count": 82, "metadata": {}, "output_type": "execute_result" } ], "source": [ "['<', '}', '!'] == ['<', 'w', '!']" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "JGXE5Ju9a3JK", "outputId": "d024223a-ca6f-4d1e-dd4e-74194c2c7823" }, "outputs": [ { "data": { "text/plain": [ "2" ] }, "execution_count": 85, "metadata": {}, "output_type": "execute_result" } ], "source": [ "torch.sum(torch.tensor([2, 3, 4]) == torch.tensor([2, 5, 4])).item()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "U2LNX3fya3JL" }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "OcJbMOWurI5U" }, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": { "id": "Bej5OQz4rMIt" }, "source": [ "#Prepare Data for Fine-Tune" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 81, "referenced_widgets": [ "73abf7199d3e4853a12a1458d54d5cc9", "3328747c3bb64797ac036d795bce8fba", "3a1c027ff6a841c3a88eb2e9c19af5c0", "fc48cabbba9c4563b1cb90d04147f007", "159774f73b8047b1ae9f26f9028f31f8", "b2b6b2e728434e6382c455957f330b57", "af5c4c2bfd6f4388bef785cfaa728947", "704bc211850948b1a9ee9dd04d01d0b8", "a0a7cb292308478ab4dc55fb7ea69b80", "b5382a062a91425b8a983ec5c69ba5d9", "be635bc6e77e415da160e5173e244956", "b904e67880f24621a454ccdb10df3e4a", "bcd56cc08c46423bb2925851797b9e8c", "67a39dc45a1341519c202793c4a43cc0", "58ae791121e34d7096c2b5c412cc3b09", "484158b33883468ebca6949fabe968fa", "c4b4fcd531d74b9cabaaaf08019061b2", "a826ec21ed994b1eae0dee93034da934", "5be5a44f790f425cb2937d29c5ff5539", "eea4e0153bb94d5e9d6a2cb885b41192", "610332412caf423fb56d4302abac5235", "186994b97c4442f2ba97034c2f71930d" ] }, "id": "BFxdZrEfrRXD", "outputId": "ba128cfb-c01d-4aa3-c5bb-5842d7b8ac1f" }, "outputs": [], "source": [ "from datasets import load_dataset, Dataset\n", "from torch.utils.data import DataLoader\n", "\n", "#Specify SQL as our target language -- Stream is necssary as it provides a faster way to to get part of the dataset \n", "dataset = load_dataset(\"codeparrot/github-code\", streaming=True, languages = ['SQL'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ITRYldDGsRtv", "outputId": "bdb0c976-0e4f-48ac-8987-75446b2dea6f" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "351it [01:45, 5.69it/s]" ] } ], "source": [ "dataset_sql_list = []\n", "\n", "\n", "for idx, element in enumerate(tqdm(iter(dataset['train']))):\n", " dataset_sql_list.append(element)\n", " if idx == 1_000: #Change to be lower for testing \n", " break" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "AMMLSNeJsnv1" }, "outputs": [], "source": [ "#covert list to Dataset\n", "code_list = []\n", "\n", "for d in dataset_sql_list:\n", " code_list.append(d['code'])\n", "\n", "output_dict = {'code': code_list}\n", "\n", "dataset_sql = Dataset.from_dict(output_dict)" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 222, "referenced_widgets": [ "0a8db52b1b1c44658ceaec546adcdf92", "5392f2768260493baab51d047fe5c4ca", "abd3ee4efbf74d2eae23ea502bb2b48f", "98ee0dd1ec81423ea4bdd1937552eee8", "95988137ffaa479396d24efb98e0bd54", "9a3622953c8548bebcd0de38935c2d05", "6416ab65faed4c43a628d31e79bfbbf1", "9902a7618c75439997d3e3b7656c77c0", "c4fc0f2132f5466ab1e481f78d96d12a", "581a792d3c5841cb8694527e359ea01b", "69ba53d684024c8bada63c016454ed25" ] }, "id": "zZtvrus3sZwN", "outputId": "f4954d1b-488f-4fbc-c4e5-a5581c3f7107" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0a8db52b1b1c44658ceaec546adcdf92", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/1001 [00:00 512). Running this sequence through the model will result in indexing errors\n" ] }, { "ename": "NameError", "evalue": "ignored", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mtokenize_function\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatched\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mremove_columns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"code\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m )\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mtokenized_datasets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mNameError\u001b[0m: name 'tokenized_datasets' is not defined" ] } ], "source": [ "def tokenize_function(data):\n", " return tokenizer(data[\"code\"])\n", "\n", "\n", "tokenized_datasets_sql = dataset_sql.map(\n", " tokenize_function, batched=True, remove_columns=[\"code\"]\n", ")" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "id": "SoqWO2PPvkxg" }, "outputs": [], "source": [ "chunk_size = 128" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "id": "JC5Xe1OjuKMh" }, "outputs": [], "source": [ "def group_texts(examples):\n", " # Concatenate all texts\n", " concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\n", " # Compute length of concatenated texts\n", " total_length = len(concatenated_examples[list(examples.keys())[0]])\n", " # We drop the last chunk if it's smaller than chunk_size\n", " total_length = (total_length // chunk_size) * chunk_size\n", " # Split by chunks of max_len\n", " result = {\n", " k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)]\n", " for k, t in concatenated_examples.items()\n", " }\n", " # Create a new labels column\n", " result[\"labels\"] = result[\"input_ids\"].copy()\n", " return result" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 88, "referenced_widgets": [ "6a46fde25f1c41b2bdfc63c4ab0c1f62", "f01c781b3d1f45abb2ad6f28fd522446", "18970a099d8644b08c4e9b922bcd033c", "2ccaa71a83e04a059c8b951e9c6cc435", "e3474e3bd368442eadf2a428bafb2c5b", "ef5f9ab0eccb42e882a71cf35202912f", "371af51396de42fdaf214bbd1717cb71", "0a90f378430f4ecb8b70e01112300a28", "5b5f63f10cb04e9d96e843d21d2e8f54", "145b730cd2594466bb56a42a11604cb5", "8becb928304c4be3b635df7188d41e7f" ] }, "id": "0lSUSsDPvFat", "outputId": "a37d46cb-fc3b-42c6-d2d6-f26345051011" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "6a46fde25f1c41b2bdfc63c4ab0c1f62", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/1001 [00:00 \u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mlogging_steps\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdownsampled_dataset\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"train\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m//\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 9\u001b[0;31m training_args = TrainingArguments(\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0moutput_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"ABSH/codebert-finetuned-sql\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0moverwrite_output_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.9/dist-packages/transformers/training_args.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, output_dir, overwrite_output_dir, do_train, do_eval, do_predict, evaluation_strategy, prediction_loss_only, per_device_train_batch_size, per_device_eval_batch_size, per_gpu_train_batch_size, per_gpu_eval_batch_size, gradient_accumulation_steps, eval_accumulation_steps, eval_delay, learning_rate, weight_decay, adam_beta1, adam_beta2, adam_epsilon, max_grad_norm, num_train_epochs, max_steps, lr_scheduler_type, warmup_ratio, warmup_steps, log_level, log_level_replica, log_on_each_node, logging_dir, logging_strategy, logging_first_step, logging_steps, logging_nan_inf_filter, save_strategy, save_steps, save_total_limit, save_safetensors, save_on_each_node, no_cuda, use_mps_device, seed, data_seed, jit_mode_eval, use_ipex, bf16, fp16, fp16_opt_level, half_precision_backend, bf16_full_eval, fp16_full_eval, tf32, local_rank, xpu_backend, tpu_num_cores, tpu_metrics_debug, debug, dataloader_drop_last, eval_steps, dataloader_num_workers, past_index, run_name, disable_tqdm, remove_unused_columns, label_names, load_best_model_at_end, metric_for_best_model, greater_is_better, ignore_data_skip, sharded_ddp, fsdp, fsdp_min_num_params, fsdp_config, fsdp_transformer_layer_cls_to_wrap, deepspeed, label_smoothing_factor, optim, optim_args, adafactor, group_by_length, length_column_name, report_to, ddp_find_unused_parameters, ddp_bucket_cap_mb, dataloader_pin_memory, skip_memory_metr...\n", "\u001b[0;32m/usr/local/lib/python3.9/dist-packages/transformers/training_args.py\u001b[0m in \u001b[0;36m__post_init__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1261\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfp16\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfp16_full_eval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m ):\n\u001b[0;32m-> 1263\u001b[0;31m raise ValueError(\n\u001b[0m\u001b[1;32m 1264\u001b[0m \u001b[0;34m\"FP16 Mixed precision training with AMP or APEX (`--fp16`) and FP16 half precision evaluation\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1265\u001b[0m \u001b[0;34m\" (`--fp16_full_eval`) can only be used on CUDA devices.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mValueError\u001b[0m: FP16 Mixed precision training with AMP or APEX (`--fp16`) and FP16 half precision evaluation (`--fp16_full_eval`) can only be used on CUDA devices." ] } ], "source": [ "from transformers import TrainingArguments\n", "\n", "#GPU Needed\n", "\n", "batch_size = 64\n", "# Show the training loss with every epoch\n", "logging_steps = len(downsampled_dataset[\"train\"]) // batch_size\n", "\n", "training_args = TrainingArguments(\n", " output_dir=\"ABSH/codebert-finetuned-sql\",\n", " overwrite_output_dir=True,\n", " evaluation_strategy=\"epoch\",\n", " learning_rate=2e-5,\n", " weight_decay=0.01,\n", " per_device_train_batch_size=batch_size,\n", " per_device_eval_batch_size=batch_size,\n", " push_to_hub=True,\n", " fp16=True,\n", " logging_steps=logging_steps,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "bhlQOLx4yNix" }, "outputs": [], "source": [ "from transformers import Trainer\n", "\n", "trainer = Trainer(\n", " model=model_to_finetune,\n", " args=training_args,\n", " train_dataset=downsampled_dataset[\"train\"],\n", " eval_dataset=downsampled_dataset[\"test\"],\n", " data_collator=data_collator,\n", " tokenizer=tokenizer,\n", ")" ] }, { "cell_type": "code", "execution_count": 34, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 55 }, "id": "CEc92HLB92oz", "outputId": "742436b6-9185-4dcb-948c-c65974a28562" }, "outputs": [ { "data": { "text/html": [ "\n", "
\n", " \n", " \n", " [1/1 : < :]\n", "
\n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ ">>> Perplexity: 1.01\n" ] } ], "source": [ "import math\n", "\n", "eval_results = trainer.evaluate()\n", "print(f\">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}\")" ] }, { "cell_type": "code", "execution_count": 35, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 281 }, "id": "KBxxOubn97IK", "outputId": "7b149169-544f-4f33-85b5-943ea20b9b96" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/usr/local/lib/python3.9/dist-packages/transformers/optimization.py:391: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n", " warnings.warn(\n" ] }, { "data": { "text/html": [ "\n", "
\n", " \n", " \n", " [15/15 00:01, Epoch 3/3]\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
EpochTraining LossValidation Loss
11.2480001.709759
21.3682000.358540
31.1891001.373444

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "\n", "

\n", " \n", " \n", " [1/1 00:20]\n", "
\n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [ "TrainOutput(global_step=15, training_loss=1.2684219360351563, metrics={'train_runtime': 1.7254, 'train_samples_per_second': 17.387, 'train_steps_per_second': 8.693, 'total_flos': 1974490974720.0, 'train_loss': 1.2684219360351563, 'epoch': 3.0})" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "trainer.train()" ] }, { "cell_type": "code", "execution_count": 36, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 55 }, "id": "LLvr9-rGLs2v", "outputId": "e705fce3-a59a-4f2e-b9d9-486f9bcd11b5" }, "outputs": [ { "data": { "text/html": [ "\n", "
\n", " \n", " \n", " [1/1 : < :]\n", "
\n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ ">>> Perplexity: 2.49\n" ] } ], "source": [ "eval_results = trainer.evaluate()\n", "print(f\">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}\")" ] }, { "cell_type": "code", "execution_count": 37, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 871 }, "id": "-el_LASqABAL", "outputId": "343ae017-db21-45ca-be1f-7a23f2f52967" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "batch response: Authorization error.\n", "error: failed to push some refs to 'https://user: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@huggingface.co/ABSH/codebert-finetuned-sql'\n", "\n", "WARNING:huggingface_hub.repository:batch response: Authorization error.\n", "error: failed to push some refs to 'https://user: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@huggingface.co/ABSH/codebert-finetuned-sql'\n", "\n" ] }, { "ename": "OSError", "evalue": "ignored", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/usr/local/lib/python3.9/dist-packages/huggingface_hub/repository.py\u001b[0m in \u001b[0;36mgit_push\u001b[0;34m(self, upstream, blocking, auto_lfs_prune)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mreturn_code\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1099\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCalledProcessError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreturn_code\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstdout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstderr\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstderr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mCalledProcessError\u001b[0m: Command '['git', 'push', '--set-upstream', 'origin', 'main']' returned non-zero exit status 1.", "\nDuring handling of the above exception, another exception occurred:\n", "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpush_to_hub\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.9/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mpush_to_hub\u001b[0;34m(self, commit_message, blocking, **kwargs)\u001b[0m\n\u001b[1;32m 3659\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpush_in_progress\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3660\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3661\u001b[0;31m git_head_commit_url = self.repo.push_to_hub(\n\u001b[0m\u001b[1;32m 3662\u001b[0m \u001b[0mcommit_message\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcommit_message\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mblocking\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mblocking\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mauto_lfs_prune\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3663\u001b[0m )\n", "\u001b[0;32m/usr/local/lib/python3.9/dist-packages/huggingface_hub/repository.py\u001b[0m in \u001b[0;36mpush_to_hub\u001b[0;34m(self, commit_message, blocking, clean_ok, auto_lfs_prune)\u001b[0m\n\u001b[1;32m 1305\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgit_add\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mauto_lfs_track\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1306\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgit_commit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcommit_message\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1307\u001b[0;31m return self.git_push(\n\u001b[0m\u001b[1;32m 1308\u001b[0m \u001b[0mupstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34mf\"origin {self.current_branch}\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1309\u001b[0m \u001b[0mblocking\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mblocking\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.9/dist-packages/huggingface_hub/repository.py\u001b[0m in \u001b[0;36mgit_push\u001b[0;34m(self, upstream, blocking, auto_lfs_prune)\u001b[0m\n\u001b[1;32m 1100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1101\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCalledProcessError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1102\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mEnvironmentError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstderr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1103\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1104\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mblocking\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mOSError\u001b[0m: batch response: Authorization error.\nerror: failed to push some refs to 'https://user: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@huggingface.co/ABSH/codebert-finetuned-sql'\n" ] } ], "source": [ "trainer.push_to_hub()" ] }, { "cell_type": "markdown", "metadata": { "id": "PaOIKdWHcVKr" }, "source": [ "#Fine-Tune Model with Accelartion" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 81, "referenced_widgets": [ "d63668c76e494164bfe3a97f0b3fbc35", "8aed8c83c26a4fc08c0226e0afe142d1", "aa9e5dc34987457ba8132dc5faf197c3", "47d0fc4344cf4690a8c91743f4a06ef1", "7336e16c609f40e0a780b74553af7149", "69e3226d0fea4c1790bfef7435e19af4", "f36f45f89dd24fffb02fa6f441af365d", "f15ef3331a6c441d828e0a63609ab1ce", "787706b07f3d46aa98acae65d768a302", "f08ef47149e14dc1850be1dd2213dea6", "d283cea383b94525ae10e62d50f82dba", "edfd968788b94f798b81abca4eb7f673", "c44b9367f99b41f1bab44fe0ffef053c", "871984107d39457d996c84211ae26fec", "6680de217b6b4e43989e0c92ab10aa18", "da71c7f787ad4adca381a7dd7e4eeead", "17aaa3fcd9ce497aae02a461fa5ed057", "f00a1407959c4343b814cab0216194b7", "bd38d2f6d18347e2b95b641435b1d71e", "0efda4d9914840e9b5a99a0e0e62986f", "7b362ec7255f4ef89c2746db11e07417", "dafa31af41ef4c4eae709ddd985458e0" ] }, "id": "btrWYYJzq14M", "outputId": "0858acb9-962d-43e5-8435-3db596b03c65" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "d63668c76e494164bfe3a97f0b3fbc35", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading builder script: 0%| | 0.00/7.23k [00:00
Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file. " } }, "1215458d6e6e4c08a106d065440e04bb": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_f3c940f54cd84e839ffcdc56e86f5ccd", "IPY_MODEL_81130b6c67794e409d4a4808d995d778", "IPY_MODEL_326ae8ed30094a30b4911e98c96858fe" ], "layout": "IPY_MODEL_d1d636fedf5447a18c7b9edaec33ffb0" } }, "145b730cd2594466bb56a42a11604cb5": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "159774f73b8047b1ae9f26f9028f31f8": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "17aaa3fcd9ce497aae02a461fa5ed057": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "186994b97c4442f2ba97034c2f71930d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "18970a099d8644b08c4e9b922bcd033c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0a90f378430f4ecb8b70e01112300a28", "max": 1001, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_5b5f63f10cb04e9d96e843d21d2e8f54", "value": 1001 } }, "1afa7ca9c29d43589536ccdfaf0d369f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "1ffb5c0070db4b5abd1f70b0b04e11e4": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "26d927b9ee1a40b5bdefce75cc6826c0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "2c45744cb3394700ae1323eeb44d0c67": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2ccaa71a83e04a059c8b951e9c6cc435": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_145b730cd2594466bb56a42a11604cb5", "placeholder": "​", "style": "IPY_MODEL_8becb928304c4be3b635df7188d41e7f", "value": " 1000/1001 [00:42<00:00, 23.61 examples/s]" } }, "2f0970bd274f42cfb8fbaf90c42a495e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "PasswordModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "PasswordModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "PasswordView", "continuous_update": true, "description": "Token:", "description_tooltip": null, "disabled": false, "layout": "IPY_MODEL_b02d2a074963464f842bf5288fc392b1", "placeholder": "​", "style": "IPY_MODEL_086f38adee954a339186d967dbb551f0", "value": "" } }, "326ae8ed30094a30b4911e98c96858fe": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9b8e16c408c846f092edf7b9050c8ad2", "placeholder": "​", "style": "IPY_MODEL_ad755941d549436a8f2d8cb371368771", "value": " 1/1 [00:01<00:00, 1.00s/it]" } }, "3328747c3bb64797ac036d795bce8fba": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_b2b6b2e728434e6382c455957f330b57", "placeholder": "​", "style": "IPY_MODEL_af5c4c2bfd6f4388bef785cfaa728947", "value": "Downloading builder script: 100%" } }, "371af51396de42fdaf214bbd1717cb71": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "3740ebe878e846cc93b632495b023ef8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "VBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "VBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "VBoxView", "box_style": "", "children": [ "IPY_MODEL_115d86c2cee8402782366d6b5b09f871", "IPY_MODEL_2f0970bd274f42cfb8fbaf90c42a495e", "IPY_MODEL_ce3f6aa1245b4f3aa37670517b94658c", "IPY_MODEL_ff31ebf8d11e481485ad07c9f8bd8618", "IPY_MODEL_39167340720c460a8dfc88fdc426ee9d" ], "layout": "IPY_MODEL_8ad7a879bd624323a9bddec771c8cdaa" } }, "39167340720c460a8dfc88fdc426ee9d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ddc3696a2a674279a73a20b04e71d055", "placeholder": "​", "style": "IPY_MODEL_a8562dcddc2747c3b1225cacabbd679e", "value": "\nPro Tip: If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks. " } }, "3a1c027ff6a841c3a88eb2e9c19af5c0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_704bc211850948b1a9ee9dd04d01d0b8", "max": 7226, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_a0a7cb292308478ab4dc55fb7ea69b80", "value": 7226 } }, "44d39ff0393c467ebd4ef273cca2e9be": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "47d0fc4344cf4690a8c91743f4a06ef1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_f08ef47149e14dc1850be1dd2213dea6", "placeholder": "​", "style": "IPY_MODEL_d283cea383b94525ae10e62d50f82dba", "value": " 7.23k/7.23k [00:00<00:00, 276kB/s]" } }, "484158b33883468ebca6949fabe968fa": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "5392f2768260493baab51d047fe5c4ca": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9a3622953c8548bebcd0de38935c2d05", "placeholder": "​", "style": "IPY_MODEL_6416ab65faed4c43a628d31e79bfbbf1", "value": "Map: 100%" } }, "581a792d3c5841cb8694527e359ea01b": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "58ae791121e34d7096c2b5c412cc3b09": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_610332412caf423fb56d4302abac5235", "placeholder": "​", "style": "IPY_MODEL_186994b97c4442f2ba97034c2f71930d", "value": " 7.54k/7.54k [00:00<00:00, 256kB/s]" } }, "5b5f63f10cb04e9d96e843d21d2e8f54": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "5be5a44f790f425cb2937d29c5ff5539": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "610332412caf423fb56d4302abac5235": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6416ab65faed4c43a628d31e79bfbbf1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "6680de217b6b4e43989e0c92ab10aa18": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_7b362ec7255f4ef89c2746db11e07417", "placeholder": "​", "style": "IPY_MODEL_dafa31af41ef4c4eae709ddd985458e0", "value": " 7.54k/7.54k [00:00<00:00, 275kB/s]" } }, "67a39dc45a1341519c202793c4a43cc0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_5be5a44f790f425cb2937d29c5ff5539", "max": 7537, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_eea4e0153bb94d5e9d6a2cb885b41192", "value": 7537 } }, "69ba53d684024c8bada63c016454ed25": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "69e3226d0fea4c1790bfef7435e19af4": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6a46fde25f1c41b2bdfc63c4ab0c1f62": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_f01c781b3d1f45abb2ad6f28fd522446", "IPY_MODEL_18970a099d8644b08c4e9b922bcd033c", "IPY_MODEL_2ccaa71a83e04a059c8b951e9c6cc435" ], "layout": "IPY_MODEL_e3474e3bd368442eadf2a428bafb2c5b" } }, "6c3650a634274e518d9125d89afb75ac": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "704bc211850948b1a9ee9dd04d01d0b8": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7336e16c609f40e0a780b74553af7149": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "73abf7199d3e4853a12a1458d54d5cc9": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_3328747c3bb64797ac036d795bce8fba", "IPY_MODEL_3a1c027ff6a841c3a88eb2e9c19af5c0", "IPY_MODEL_fc48cabbba9c4563b1cb90d04147f007" ], "layout": "IPY_MODEL_159774f73b8047b1ae9f26f9028f31f8" } }, "787706b07f3d46aa98acae65d768a302": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "7b362ec7255f4ef89c2746db11e07417": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "81130b6c67794e409d4a4808d995d778": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_aa4b1d988b2148b8b7a40bacb4c64e6c", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_6c3650a634274e518d9125d89afb75ac", "value": 1 } }, "871984107d39457d996c84211ae26fec": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_bd38d2f6d18347e2b95b641435b1d71e", "max": 7537, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_0efda4d9914840e9b5a99a0e0e62986f", "value": 7537 } }, "8ad7a879bd624323a9bddec771c8cdaa": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": "center", "align_self": null, "border": null, "bottom": null, "display": "flex", "flex": null, "flex_flow": "column", "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "50%" } }, "8aed8c83c26a4fc08c0226e0afe142d1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_69e3226d0fea4c1790bfef7435e19af4", "placeholder": "​", "style": "IPY_MODEL_f36f45f89dd24fffb02fa6f441af365d", "value": "Downloading builder script: 100%" } }, "8b9695a72a344ea6a794bfe260db32f8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8becb928304c4be3b635df7188d41e7f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "95988137ffaa479396d24efb98e0bd54": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": "hidden", "width": null } }, "98ee0dd1ec81423ea4bdd1937552eee8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_581a792d3c5841cb8694527e359ea01b", "placeholder": "​", "style": "IPY_MODEL_69ba53d684024c8bada63c016454ed25", "value": " 1000/1001 [00:34<00:00, 28.66 examples/s]" } }, "9902a7618c75439997d3e3b7656c77c0": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9a3622953c8548bebcd0de38935c2d05": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9b8e16c408c846f092edf7b9050c8ad2": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9e5a7a7bc9944201b4d44b56d4ecb358": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ButtonStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ButtonStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "button_color": null, "font_weight": "" } }, "a0a7cb292308478ab4dc55fb7ea69b80": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "a826ec21ed994b1eae0dee93034da934": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "a8562dcddc2747c3b1225cacabbd679e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "aa4b1d988b2148b8b7a40bacb4c64e6c": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "aa9e5dc34987457ba8132dc5faf197c3": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_f15ef3331a6c441d828e0a63609ab1ce", "max": 7226, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_787706b07f3d46aa98acae65d768a302", "value": 7226 } }, "abd3ee4efbf74d2eae23ea502bb2b48f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9902a7618c75439997d3e3b7656c77c0", "max": 1001, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_c4fc0f2132f5466ab1e481f78d96d12a", "value": 1001 } }, "ad755941d549436a8f2d8cb371368771": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "af5c4c2bfd6f4388bef785cfaa728947": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "b02d2a074963464f842bf5288fc392b1": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b2b6b2e728434e6382c455957f330b57": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b5382a062a91425b8a983ec5c69ba5d9": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b904e67880f24621a454ccdb10df3e4a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_bcd56cc08c46423bb2925851797b9e8c", "IPY_MODEL_67a39dc45a1341519c202793c4a43cc0", "IPY_MODEL_58ae791121e34d7096c2b5c412cc3b09" ], "layout": "IPY_MODEL_484158b33883468ebca6949fabe968fa" } }, "bcd56cc08c46423bb2925851797b9e8c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_c4b4fcd531d74b9cabaaaf08019061b2", "placeholder": "​", "style": "IPY_MODEL_a826ec21ed994b1eae0dee93034da934", "value": "Downloading readme: 100%" } }, "bd38d2f6d18347e2b95b641435b1d71e": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "be635bc6e77e415da160e5173e244956": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "c010525bfff34886a1ff8ea5825445b7": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c44b9367f99b41f1bab44fe0ffef053c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_17aaa3fcd9ce497aae02a461fa5ed057", "placeholder": "​", "style": "IPY_MODEL_f00a1407959c4343b814cab0216194b7", "value": "Downloading readme: 100%" } }, "c4b4fcd531d74b9cabaaaf08019061b2": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c4fc0f2132f5466ab1e481f78d96d12a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "ce3f6aa1245b4f3aa37670517b94658c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "CheckboxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "CheckboxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "CheckboxView", "description": "Add token as git credential?", "description_tooltip": null, "disabled": false, "indent": true, "layout": "IPY_MODEL_44d39ff0393c467ebd4ef273cca2e9be", "style": "IPY_MODEL_8b9695a72a344ea6a794bfe260db32f8", "value": true } }, "d1d636fedf5447a18c7b9edaec33ffb0": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "d283cea383b94525ae10e62d50f82dba": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "d63668c76e494164bfe3a97f0b3fbc35": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_8aed8c83c26a4fc08c0226e0afe142d1", "IPY_MODEL_aa9e5dc34987457ba8132dc5faf197c3", "IPY_MODEL_47d0fc4344cf4690a8c91743f4a06ef1" ], "layout": "IPY_MODEL_7336e16c609f40e0a780b74553af7149" } }, "da71c7f787ad4adca381a7dd7e4eeead": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "dafa31af41ef4c4eae709ddd985458e0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "ddc3696a2a674279a73a20b04e71d055": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e3474e3bd368442eadf2a428bafb2c5b": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": "hidden", "width": null } }, "edfd968788b94f798b81abca4eb7f673": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_c44b9367f99b41f1bab44fe0ffef053c", "IPY_MODEL_871984107d39457d996c84211ae26fec", "IPY_MODEL_6680de217b6b4e43989e0c92ab10aa18" ], "layout": "IPY_MODEL_da71c7f787ad4adca381a7dd7e4eeead" } }, "eea4e0153bb94d5e9d6a2cb885b41192": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "ef5f9ab0eccb42e882a71cf35202912f": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f00a1407959c4343b814cab0216194b7": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "f01c781b3d1f45abb2ad6f28fd522446": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ef5f9ab0eccb42e882a71cf35202912f", "placeholder": "​", "style": "IPY_MODEL_371af51396de42fdaf214bbd1717cb71", "value": "Map: 100%" } }, "f08ef47149e14dc1850be1dd2213dea6": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f15ef3331a6c441d828e0a63609ab1ce": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f36f45f89dd24fffb02fa6f441af365d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "f3c940f54cd84e839ffcdc56e86f5ccd": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1ffb5c0070db4b5abd1f70b0b04e11e4", "placeholder": "​", "style": "IPY_MODEL_1afa7ca9c29d43589536ccdfaf0d369f", "value": "100%" } }, "fc48cabbba9c4563b1cb90d04147f007": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_b5382a062a91425b8a983ec5c69ba5d9", "placeholder": "​", "style": "IPY_MODEL_be635bc6e77e415da160e5173e244956", "value": " 7.23k/7.23k [00:00<00:00, 423kB/s]" } }, "ff31ebf8d11e481485ad07c9f8bd8618": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ButtonModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ButtonModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ButtonView", "button_style": "", "description": "Login", "disabled": false, "icon": "", "layout": "IPY_MODEL_2c45744cb3394700ae1323eeb44d0c67", "style": "IPY_MODEL_9e5a7a7bc9944201b4d44b56d4ecb358", "tooltip": "" } } } } }, "nbformat": 4, "nbformat_minor": 1 }