will33am commited on
Commit
7d758e3
1 Parent(s): ffb85f9
.ipynb_checkpoints/AVA-checkpoint.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import datasets
4
  import joblib
5
  from pathlib import Path
 
6
 
7
 
8
  _BASE_HF_URL = Path("./data")
@@ -37,7 +38,9 @@ class AVA(datasets.GeneratorBasedBuilder):
37
  def _split_generators(self, dl_manager):
38
  """Returns SplitGenerators."""
39
  archives = dl_manager.download(_DATA_URL)
 
40
  self.DICT_METADATA = Path(self.dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
 
41
  return [
42
  datasets.SplitGenerator(
43
  name=datasets.Split.TRAIN,
@@ -53,7 +56,7 @@ class AVA(datasets.GeneratorBasedBuilder):
53
 
54
  idx = 0
55
  for archive in archives:
56
- for path, file in archive:
57
  if path.endswith(".jpg"):
58
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
59
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
 
3
  import datasets
4
  import joblib
5
  from pathlib import Path
6
+ from tqdm import tqdm
7
 
8
 
9
  _BASE_HF_URL = Path("./data")
 
38
  def _split_generators(self, dl_manager):
39
  """Returns SplitGenerators."""
40
  archives = dl_manager.download(_DATA_URL)
41
+ print("Init loading Metadata")
42
  self.DICT_METADATA = Path(self.dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
43
+ print("Finish loading Metadata")
44
  return [
45
  datasets.SplitGenerator(
46
  name=datasets.Split.TRAIN,
 
56
 
57
  idx = 0
58
  for archive in archives:
59
+ for path, file in tqdm(archive):
60
  if path.endswith(".jpg"):
61
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
62
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
AVA.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import datasets
4
  import joblib
5
  from pathlib import Path
 
6
 
7
 
8
  _BASE_HF_URL = Path("./data")
@@ -37,7 +38,9 @@ class AVA(datasets.GeneratorBasedBuilder):
37
  def _split_generators(self, dl_manager):
38
  """Returns SplitGenerators."""
39
  archives = dl_manager.download(_DATA_URL)
 
40
  self.DICT_METADATA = Path(self.dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
 
41
  return [
42
  datasets.SplitGenerator(
43
  name=datasets.Split.TRAIN,
@@ -53,7 +56,7 @@ class AVA(datasets.GeneratorBasedBuilder):
53
 
54
  idx = 0
55
  for archive in archives:
56
- for path, file in archive:
57
  if path.endswith(".jpg"):
58
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
59
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
 
3
  import datasets
4
  import joblib
5
  from pathlib import Path
6
+ from tqdm import tqdm
7
 
8
 
9
  _BASE_HF_URL = Path("./data")
 
38
  def _split_generators(self, dl_manager):
39
  """Returns SplitGenerators."""
40
  archives = dl_manager.download(_DATA_URL)
41
+ print("Init loading Metadata")
42
  self.DICT_METADATA = Path(self.dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
43
+ print("Finish loading Metadata")
44
  return [
45
  datasets.SplitGenerator(
46
  name=datasets.Split.TRAIN,
 
56
 
57
  idx = 0
58
  for archive in archives:
59
+ for path, file in tqdm(archive):
60
  if path.endswith(".jpg"):
61
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
62
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
notebooks/Test.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 7,
6
  "id": "aef315bf",
7
  "metadata": {},
8
  "outputs": [],
@@ -12,63 +12,44 @@
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 9,
16
  "id": "c0ed6498",
17
  "metadata": {},
18
  "outputs": [
19
- {
20
- "name": "stdout",
21
- "output_type": "stream",
22
- "text": [
23
- "Downloading and preparing dataset ava/default to /home/william/.cache/huggingface/datasets/will33am___ava/default/1.0.0/dc18bb43c11395496a83e96a91fdb26162bab200a16d35297b4d6e6ceccb4864...\n"
24
- ]
25
- },
26
  {
27
  "data": {
28
  "application/vnd.jupyter.widget-view+json": {
29
- "model_id": "d06e8600ce884a008bc0356e22aaaf97",
30
  "version_major": 2,
31
  "version_minor": 0
32
  },
33
  "text/plain": [
34
- "Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
35
  ]
36
  },
37
  "metadata": {},
38
  "output_type": "display_data"
39
  },
 
 
 
 
 
 
 
40
  {
41
  "data": {
42
  "application/vnd.jupyter.widget-view+json": {
43
- "model_id": "f139bae410ec45eb81a034742f6ea059",
44
  "version_major": 2,
45
  "version_minor": 0
46
  },
47
  "text/plain": [
48
- "Computing checksums: 0%| | 0/1 [00:01<?, ?it/s]"
49
  ]
50
  },
51
  "metadata": {},
52
  "output_type": "display_data"
53
- },
54
- {
55
- "ename": "KeyboardInterrupt",
56
- "evalue": "",
57
- "output_type": "error",
58
- "traceback": [
59
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
60
- "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
61
- "File \u001b[0;32m<timed exec>:1\u001b[0m\n",
62
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/load.py:1757\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs)\u001b[0m\n\u001b[1;32m 1754\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[1;32m 1756\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[0;32m-> 1757\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1758\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1759\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1760\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_verifications\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_verifications\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1761\u001b[0m \u001b[43m \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1762\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1763\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1765\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[1;32m 1766\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 1767\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[1;32m 1768\u001b[0m )\n",
63
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/builder.py:860\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[0;34m(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[1;32m 858\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 859\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[0;32m--> 860\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 861\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 862\u001b[0m \u001b[43m \u001b[49m\u001b[43mverify_infos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify_infos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 863\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 864\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 865\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 866\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[1;32m 867\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
64
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/builder.py:1611\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verify_infos, **prepare_splits_kwargs)\u001b[0m\n\u001b[1;32m 1610\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verify_infos, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[0;32m-> 1611\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1612\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mverify_infos\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify_infos\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\n\u001b[1;32m 1613\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
65
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/builder.py:931\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verify_infos, **prepare_split_kwargs)\u001b[0m\n\u001b[1;32m 929\u001b[0m split_dict \u001b[38;5;241m=\u001b[39m SplitDict(dataset_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname)\n\u001b[1;32m 930\u001b[0m split_generators_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_split_generators_kwargs(prepare_split_kwargs)\n\u001b[0;32m--> 931\u001b[0m split_generators \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_split_generators\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43msplit_generators_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 933\u001b[0m \u001b[38;5;66;03m# Checksums verification\u001b[39;00m\n\u001b[1;32m 934\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m verify_infos \u001b[38;5;129;01mand\u001b[39;00m dl_manager\u001b[38;5;241m.\u001b[39mrecord_checksums:\n",
66
- "File \u001b[0;32m~/.cache/huggingface/modules/datasets_modules/datasets/will33am--AVA/dc18bb43c11395496a83e96a91fdb26162bab200a16d35297b4d6e6ceccb4864/AVA.py:39\u001b[0m, in \u001b[0;36mAVA._split_generators\u001b[0;34m(self, dl_manager)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_split_generators\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager):\n\u001b[1;32m 38\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Returns SplitGenerators.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 39\u001b[0m archives \u001b[38;5;241m=\u001b[39m \u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_DATA_URL\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 41\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m [\n\u001b[1;32m 42\u001b[0m datasets\u001b[38;5;241m.\u001b[39mSplitGenerator(\n\u001b[1;32m 43\u001b[0m name\u001b[38;5;241m=\u001b[39mdatasets\u001b[38;5;241m.\u001b[39mSplit\u001b[38;5;241m.\u001b[39mTRAIN,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 48\u001b[0m )\n\u001b[1;32m 49\u001b[0m ]\n",
67
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/download/download_manager.py:346\u001b[0m, in \u001b[0;36mDownloadManager.download\u001b[0;34m(self, url_or_urls)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdownloaded_paths\u001b[38;5;241m.\u001b[39mupdate(\u001b[38;5;28mdict\u001b[39m(\u001b[38;5;28mzip\u001b[39m(url_or_urls\u001b[38;5;241m.\u001b[39mflatten(), downloaded_path_or_paths\u001b[38;5;241m.\u001b[39mflatten())))\n\u001b[1;32m 345\u001b[0m start_time \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mnow()\n\u001b[0;32m--> 346\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_record_sizes_checksums\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl_or_urls\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownloaded_path_or_paths\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 347\u001b[0m duration \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mnow() \u001b[38;5;241m-\u001b[39m start_time\n\u001b[1;32m 348\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mChecksum Computation took \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mduration\u001b[38;5;241m.\u001b[39mtotal_seconds()\u001b[38;5;250m \u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;241m60\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m min\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
68
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/download/download_manager.py:246\u001b[0m, in \u001b[0;36mDownloadManager._record_sizes_checksums\u001b[0;34m(self, url_or_urls, downloaded_path_or_paths)\u001b[0m\n\u001b[1;32m 238\u001b[0m delay \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m5\u001b[39m\n\u001b[1;32m 239\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m url, path \u001b[38;5;129;01min\u001b[39;00m tqdm(\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mzip\u001b[39m(url_or_urls\u001b[38;5;241m.\u001b[39mflatten(), downloaded_path_or_paths\u001b[38;5;241m.\u001b[39mflatten())),\n\u001b[1;32m 241\u001b[0m delay\u001b[38;5;241m=\u001b[39mdelay,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 244\u001b[0m ):\n\u001b[1;32m 245\u001b[0m \u001b[38;5;66;03m# call str to support PathLike objects\u001b[39;00m\n\u001b[0;32m--> 246\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_recorded_sizes_checksums[\u001b[38;5;28mstr\u001b[39m(url)] \u001b[38;5;241m=\u001b[39m \u001b[43mget_size_checksum_dict\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrecord_checksum\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecord_checksums\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m warn_about_checksums \u001b[38;5;129;01mand\u001b[39;00m _time \u001b[38;5;241m+\u001b[39m delay \u001b[38;5;241m<\u001b[39m time\u001b[38;5;241m.\u001b[39mtime():\n\u001b[1;32m 250\u001b[0m warn_about_checksums \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n",
69
- "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/utils/info_utils.py:84\u001b[0m, in \u001b[0;36mget_size_checksum_dict\u001b[0;34m(path, record_checksum)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(path, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrb\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 83\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28miter\u001b[39m(\u001b[38;5;28;01mlambda\u001b[39;00m: f\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m<<\u001b[39m \u001b[38;5;241m20\u001b[39m), \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 84\u001b[0m \u001b[43mm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mupdate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mchunk\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 85\u001b[0m checksum \u001b[38;5;241m=\u001b[39m m\u001b[38;5;241m.\u001b[39mhexdigest()\n\u001b[1;32m 86\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
70
- "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
71
- ]
72
  }
73
  ],
74
  "source": [
@@ -79,7 +60,7 @@
79
  {
80
  "cell_type": "code",
81
  "execution_count": null,
82
- "id": "2f025a25",
83
  "metadata": {},
84
  "outputs": [],
85
  "source": []
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 10,
6
  "id": "aef315bf",
7
  "metadata": {},
8
  "outputs": [],
 
12
  },
13
  {
14
  "cell_type": "code",
15
+ "execution_count": null,
16
  "id": "c0ed6498",
17
  "metadata": {},
18
  "outputs": [
 
 
 
 
 
 
 
19
  {
20
  "data": {
21
  "application/vnd.jupyter.widget-view+json": {
22
+ "model_id": "50a1f0994d194159b8548ee244ce5212",
23
  "version_major": 2,
24
  "version_minor": 0
25
  },
26
  "text/plain": [
27
+ "Downloading builder script: 0%| | 0.00/2.14k [00:00<?, ?B/s]"
28
  ]
29
  },
30
  "metadata": {},
31
  "output_type": "display_data"
32
  },
33
+ {
34
+ "name": "stdout",
35
+ "output_type": "stream",
36
+ "text": [
37
+ "Downloading and preparing dataset ava/default to /home/william/.cache/huggingface/datasets/will33am___ava/default/1.0.0/e506f33c7289f91cd0c33a5e116547fb1344bc64a605d0ea5bef759973b0e1a1...\n"
38
+ ]
39
+ },
40
  {
41
  "data": {
42
  "application/vnd.jupyter.widget-view+json": {
43
+ "model_id": "58da99a92ec04b9e809dab1dba2a1663",
44
  "version_major": 2,
45
  "version_minor": 0
46
  },
47
  "text/plain": [
48
+ "Downloading data files: 0%| | 0/1 [00:00<?, ?it/s]"
49
  ]
50
  },
51
  "metadata": {},
52
  "output_type": "display_data"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  }
54
  ],
55
  "source": [
 
60
  {
61
  "cell_type": "code",
62
  "execution_count": null,
63
+ "id": "34fa96b9",
64
  "metadata": {},
65
  "outputs": [],
66
  "source": []