will33am commited on
Commit
ffb85f9
1 Parent(s): 7c26d90
.ipynb_checkpoints/AVA-checkpoint.py CHANGED
@@ -37,7 +37,7 @@ class AVA(datasets.GeneratorBasedBuilder):
37
  def _split_generators(self, dl_manager):
38
  """Returns SplitGenerators."""
39
  archives = dl_manager.download(_DATA_URL)
40
-
41
  return [
42
  datasets.SplitGenerator(
43
  name=datasets.Split.TRAIN,
@@ -50,17 +50,19 @@ class AVA(datasets.GeneratorBasedBuilder):
50
 
51
  def _generate_examples(self, archives, split):
52
  """Yields examples."""
53
- DICT_METADATA = Path(dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
54
  idx = 0
55
  for archive in archives:
56
  for path, file in archive:
57
  if path.endswith(".jpg"):
58
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
59
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
60
- _metadata = DICT_METADATA[_id]
61
  ex = {"image": {"path": path, "bytes": file.read()},
62
  "rating_counts": _metadata[0],
63
  "text_tag0":_metadata[1],
64
  "text_tag1": _metadata[2]}
65
  yield idx, ex
66
  idx += 1
 
 
 
37
  def _split_generators(self, dl_manager):
38
  """Returns SplitGenerators."""
39
  archives = dl_manager.download(_DATA_URL)
40
+ self.DICT_METADATA = Path(self.dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
41
  return [
42
  datasets.SplitGenerator(
43
  name=datasets.Split.TRAIN,
 
50
 
51
  def _generate_examples(self, archives, split):
52
  """Yields examples."""
53
+
54
  idx = 0
55
  for archive in archives:
56
  for path, file in archive:
57
  if path.endswith(".jpg"):
58
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
59
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
60
+ _metadata = self.DICT_METADATA[_id]
61
  ex = {"image": {"path": path, "bytes": file.read()},
62
  "rating_counts": _metadata[0],
63
  "text_tag0":_metadata[1],
64
  "text_tag1": _metadata[2]}
65
  yield idx, ex
66
  idx += 1
67
+
68
+
AVA.py CHANGED
@@ -37,7 +37,7 @@ class AVA(datasets.GeneratorBasedBuilder):
37
  def _split_generators(self, dl_manager):
38
  """Returns SplitGenerators."""
39
  archives = dl_manager.download(_DATA_URL)
40
-
41
  return [
42
  datasets.SplitGenerator(
43
  name=datasets.Split.TRAIN,
@@ -50,17 +50,19 @@ class AVA(datasets.GeneratorBasedBuilder):
50
 
51
  def _generate_examples(self, archives, split):
52
  """Yields examples."""
53
- DICT_METADATA = Path(dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
54
  idx = 0
55
  for archive in archives:
56
  for path, file in archive:
57
  if path.endswith(".jpg"):
58
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
59
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
60
- _metadata = DICT_METADATA[_id]
61
  ex = {"image": {"path": path, "bytes": file.read()},
62
  "rating_counts": _metadata[0],
63
  "text_tag0":_metadata[1],
64
  "text_tag1": _metadata[2]}
65
  yield idx, ex
66
  idx += 1
 
 
 
37
  def _split_generators(self, dl_manager):
38
  """Returns SplitGenerators."""
39
  archives = dl_manager.download(_DATA_URL)
40
+ self.DICT_METADATA = Path(self.dl_manager.download_and_extract(_BASE_HF_URL)) / "metadata.pkl"
41
  return [
42
  datasets.SplitGenerator(
43
  name=datasets.Split.TRAIN,
 
50
 
51
  def _generate_examples(self, archives, split):
52
  """Yields examples."""
53
+
54
  idx = 0
55
  for archive in archives:
56
  for path, file in archive:
57
  if path.endswith(".jpg"):
58
  # image filepath format: <IMAGE_FILE NAME>_<SYNSET_ID>.JPEG
59
  _id = int(os.path.splitext(b[0])[0].split('/')[-1])
60
+ _metadata = self.DICT_METADATA[_id]
61
  ex = {"image": {"path": path, "bytes": file.read()},
62
  "rating_counts": _metadata[0],
63
  "text_tag0":_metadata[1],
64
  "text_tag1": _metadata[2]}
65
  yield idx, ex
66
  idx += 1
67
+
68
+
notebooks/Test.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 3,
6
  "id": "aef315bf",
7
  "metadata": {},
8
  "outputs": [],
@@ -12,35 +12,21 @@
12
  },
13
  {
14
  "cell_type": "code",
15
- "execution_count": 4,
16
  "id": "c0ed6498",
17
  "metadata": {},
18
  "outputs": [
19
- {
20
- "data": {
21
- "application/vnd.jupyter.widget-view+json": {
22
- "model_id": "2dc0ecb6d071440fbb63d9eb37239d51",
23
- "version_major": 2,
24
- "version_minor": 0
25
- },
26
- "text/plain": [
27
- "Downloading builder script: 0%| | 0.00/2.12k [00:00<?, ?B/s]"
28
- ]
29
- },
30
- "metadata": {},
31
- "output_type": "display_data"
32
- },
33
  {
34
  "name": "stdout",
35
  "output_type": "stream",
36
  "text": [
37
- "Downloading and preparing dataset ava/default to /home/william/.cache/huggingface/datasets/will33am___ava/default/1.0.0/e6b9e5062c6da3936a91aa998767b2df2e4743203754168806d3df6c592a5951...\n"
38
  ]
39
  },
40
  {
41
  "data": {
42
  "application/vnd.jupyter.widget-view+json": {
43
- "model_id": "17de80e07195435abc87fa6d687bf641",
44
  "version_major": 2,
45
  "version_minor": 0
46
  },
@@ -54,36 +40,46 @@
54
  {
55
  "data": {
56
  "application/vnd.jupyter.widget-view+json": {
57
- "model_id": "6bc514a4c642434fab8a1728b6dc4ce9",
58
  "version_major": 2,
59
  "version_minor": 0
60
  },
61
  "text/plain": [
62
- "Downloading data: 0%| | 0.00/33.2G [00:00<?, ?B/s]"
63
  ]
64
  },
65
  "metadata": {},
66
  "output_type": "display_data"
67
  },
68
  {
69
- "name": "stderr",
70
- "output_type": "stream",
71
- "text": [
72
- "\n",
73
- "KeyboardInterrupt\n",
74
- "\n"
 
 
 
 
 
 
 
 
 
 
75
  ]
76
  }
77
  ],
78
  "source": [
79
  "%%time\n",
80
- "ds = load_dataset(\"will33am/AVA\",split = 'train')"
81
  ]
82
  },
83
  {
84
  "cell_type": "code",
85
  "execution_count": null,
86
- "id": "f1611451",
87
  "metadata": {},
88
  "outputs": [],
89
  "source": []
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 7,
6
  "id": "aef315bf",
7
  "metadata": {},
8
  "outputs": [],
 
12
  },
13
  {
14
  "cell_type": "code",
15
+ "execution_count": 9,
16
  "id": "c0ed6498",
17
  "metadata": {},
18
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  {
20
  "name": "stdout",
21
  "output_type": "stream",
22
  "text": [
23
+ "Downloading and preparing dataset ava/default to /home/william/.cache/huggingface/datasets/will33am___ava/default/1.0.0/dc18bb43c11395496a83e96a91fdb26162bab200a16d35297b4d6e6ceccb4864...\n"
24
  ]
25
  },
26
  {
27
  "data": {
28
  "application/vnd.jupyter.widget-view+json": {
29
+ "model_id": "d06e8600ce884a008bc0356e22aaaf97",
30
  "version_major": 2,
31
  "version_minor": 0
32
  },
 
40
  {
41
  "data": {
42
  "application/vnd.jupyter.widget-view+json": {
43
+ "model_id": "f139bae410ec45eb81a034742f6ea059",
44
  "version_major": 2,
45
  "version_minor": 0
46
  },
47
  "text/plain": [
48
+ "Computing checksums: 0%| | 0/1 [00:01<?, ?it/s]"
49
  ]
50
  },
51
  "metadata": {},
52
  "output_type": "display_data"
53
  },
54
  {
55
+ "ename": "KeyboardInterrupt",
56
+ "evalue": "",
57
+ "output_type": "error",
58
+ "traceback": [
59
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
60
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
61
+ "File \u001b[0;32m<timed exec>:1\u001b[0m\n",
62
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/load.py:1757\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs)\u001b[0m\n\u001b[1;32m 1754\u001b[0m try_from_hf_gcs \u001b[38;5;241m=\u001b[39m path \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m _PACKAGED_DATASETS_MODULES\n\u001b[1;32m 1756\u001b[0m \u001b[38;5;66;03m# Download and prepare data\u001b[39;00m\n\u001b[0;32m-> 1757\u001b[0m \u001b[43mbuilder_instance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1758\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1759\u001b[0m \u001b[43m \u001b[49m\u001b[43mdownload_mode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdownload_mode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1760\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_verifications\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_verifications\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1761\u001b[0m \u001b[43m \u001b[49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtry_from_hf_gcs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1762\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_proc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnum_proc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1763\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1765\u001b[0m \u001b[38;5;66;03m# Build dataset for splits\u001b[39;00m\n\u001b[1;32m 1766\u001b[0m keep_in_memory \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 1767\u001b[0m keep_in_memory \u001b[38;5;28;01mif\u001b[39;00m keep_in_memory \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m is_small_dataset(builder_instance\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size)\n\u001b[1;32m 1768\u001b[0m )\n",
63
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/builder.py:860\u001b[0m, in \u001b[0;36mDatasetBuilder.download_and_prepare\u001b[0;34m(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\u001b[0m\n\u001b[1;32m 858\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_proc \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 859\u001b[0m prepare_split_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_proc\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m num_proc\n\u001b[0;32m--> 860\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 861\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 862\u001b[0m \u001b[43m \u001b[49m\u001b[43mverify_infos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify_infos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 863\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_split_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 864\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mdownload_and_prepare_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 865\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 866\u001b[0m \u001b[38;5;66;03m# Sync info\u001b[39;00m\n\u001b[1;32m 867\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39mdataset_size \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msum\u001b[39m(split\u001b[38;5;241m.\u001b[39mnum_bytes \u001b[38;5;28;01mfor\u001b[39;00m split \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfo\u001b[38;5;241m.\u001b[39msplits\u001b[38;5;241m.\u001b[39mvalues())\n",
64
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/builder.py:1611\u001b[0m, in \u001b[0;36mGeneratorBasedBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verify_infos, **prepare_splits_kwargs)\u001b[0m\n\u001b[1;32m 1610\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_download_and_prepare\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager, verify_infos, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mprepare_splits_kwargs):\n\u001b[0;32m-> 1611\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_download_and_prepare\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1612\u001b[0m \u001b[43m \u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mverify_infos\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcheck_duplicate_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify_infos\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mprepare_splits_kwargs\u001b[49m\n\u001b[1;32m 1613\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
65
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/builder.py:931\u001b[0m, in \u001b[0;36mDatasetBuilder._download_and_prepare\u001b[0;34m(self, dl_manager, verify_infos, **prepare_split_kwargs)\u001b[0m\n\u001b[1;32m 929\u001b[0m split_dict \u001b[38;5;241m=\u001b[39m SplitDict(dataset_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname)\n\u001b[1;32m 930\u001b[0m split_generators_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_split_generators_kwargs(prepare_split_kwargs)\n\u001b[0;32m--> 931\u001b[0m split_generators \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_split_generators\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdl_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43msplit_generators_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 933\u001b[0m \u001b[38;5;66;03m# Checksums verification\u001b[39;00m\n\u001b[1;32m 934\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m verify_infos \u001b[38;5;129;01mand\u001b[39;00m dl_manager\u001b[38;5;241m.\u001b[39mrecord_checksums:\n",
66
+ "File \u001b[0;32m~/.cache/huggingface/modules/datasets_modules/datasets/will33am--AVA/dc18bb43c11395496a83e96a91fdb26162bab200a16d35297b4d6e6ceccb4864/AVA.py:39\u001b[0m, in \u001b[0;36mAVA._split_generators\u001b[0;34m(self, dl_manager)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_split_generators\u001b[39m(\u001b[38;5;28mself\u001b[39m, dl_manager):\n\u001b[1;32m 38\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Returns SplitGenerators.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 39\u001b[0m archives \u001b[38;5;241m=\u001b[39m \u001b[43mdl_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdownload\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_DATA_URL\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 41\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m [\n\u001b[1;32m 42\u001b[0m datasets\u001b[38;5;241m.\u001b[39mSplitGenerator(\n\u001b[1;32m 43\u001b[0m name\u001b[38;5;241m=\u001b[39mdatasets\u001b[38;5;241m.\u001b[39mSplit\u001b[38;5;241m.\u001b[39mTRAIN,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 48\u001b[0m )\n\u001b[1;32m 49\u001b[0m ]\n",
67
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/download/download_manager.py:346\u001b[0m, in \u001b[0;36mDownloadManager.download\u001b[0;34m(self, url_or_urls)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdownloaded_paths\u001b[38;5;241m.\u001b[39mupdate(\u001b[38;5;28mdict\u001b[39m(\u001b[38;5;28mzip\u001b[39m(url_or_urls\u001b[38;5;241m.\u001b[39mflatten(), downloaded_path_or_paths\u001b[38;5;241m.\u001b[39mflatten())))\n\u001b[1;32m 345\u001b[0m start_time \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mnow()\n\u001b[0;32m--> 346\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_record_sizes_checksums\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl_or_urls\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdownloaded_path_or_paths\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 347\u001b[0m duration \u001b[38;5;241m=\u001b[39m datetime\u001b[38;5;241m.\u001b[39mnow() \u001b[38;5;241m-\u001b[39m start_time\n\u001b[1;32m 348\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mChecksum Computation took \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mduration\u001b[38;5;241m.\u001b[39mtotal_seconds()\u001b[38;5;250m \u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;241m60\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m min\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
68
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/download/download_manager.py:246\u001b[0m, in \u001b[0;36mDownloadManager._record_sizes_checksums\u001b[0;34m(self, url_or_urls, downloaded_path_or_paths)\u001b[0m\n\u001b[1;32m 238\u001b[0m delay \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m5\u001b[39m\n\u001b[1;32m 239\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m url, path \u001b[38;5;129;01min\u001b[39;00m tqdm(\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mzip\u001b[39m(url_or_urls\u001b[38;5;241m.\u001b[39mflatten(), downloaded_path_or_paths\u001b[38;5;241m.\u001b[39mflatten())),\n\u001b[1;32m 241\u001b[0m delay\u001b[38;5;241m=\u001b[39mdelay,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 244\u001b[0m ):\n\u001b[1;32m 245\u001b[0m \u001b[38;5;66;03m# call str to support PathLike objects\u001b[39;00m\n\u001b[0;32m--> 246\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_recorded_sizes_checksums[\u001b[38;5;28mstr\u001b[39m(url)] \u001b[38;5;241m=\u001b[39m \u001b[43mget_size_checksum_dict\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrecord_checksum\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecord_checksums\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m warn_about_checksums \u001b[38;5;129;01mand\u001b[39;00m _time \u001b[38;5;241m+\u001b[39m delay \u001b[38;5;241m<\u001b[39m time\u001b[38;5;241m.\u001b[39mtime():\n\u001b[1;32m 250\u001b[0m warn_about_checksums \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n",
69
+ "File \u001b[0;32m/opt/conda/envs/hugginface/lib/python3.8/site-packages/datasets/utils/info_utils.py:84\u001b[0m, in \u001b[0;36mget_size_checksum_dict\u001b[0;34m(path, record_checksum)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(path, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrb\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 83\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28miter\u001b[39m(\u001b[38;5;28;01mlambda\u001b[39;00m: f\u001b[38;5;241m.\u001b[39mread(\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m<<\u001b[39m \u001b[38;5;241m20\u001b[39m), \u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 84\u001b[0m \u001b[43mm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mupdate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mchunk\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 85\u001b[0m checksum \u001b[38;5;241m=\u001b[39m m\u001b[38;5;241m.\u001b[39mhexdigest()\n\u001b[1;32m 86\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
70
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
71
  ]
72
  }
73
  ],
74
  "source": [
75
  "%%time\n",
76
+ "ds = load_dataset(\"will33am/AVA\")"
77
  ]
78
  },
79
  {
80
  "cell_type": "code",
81
  "execution_count": null,
82
+ "id": "2f025a25",
83
  "metadata": {},
84
  "outputs": [],
85
  "source": []