Datasets:

Modalities:
Image
Languages:
English
ArXiv:
Libraries:
Datasets
License:
parquet-converter commited on
Commit
67602a2
1 Parent(s): 5e193c3

Update parquet files

Browse files
README.md DELETED
@@ -1,18 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- dataset_info:
4
- features:
5
- - name: image
6
- dtype: image
7
- - name: depth_map
8
- dtype: image
9
- splits:
10
- - name: train
11
- num_bytes: 20212097551
12
- num_examples: 47584
13
- - name: validation
14
- num_bytes: 240785762
15
- num_examples: 654
16
- download_size: 35151124480
17
- dataset_size: 20452883313
18
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/train-000003.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad08e48ffffda394095e7bb9785b5cc89fe50421486bb784dd086639c65a5099
3
- size 3002982400
 
 
 
 
data/train-000004.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:942896ecb705f3f791d1d313121f8700a192e54dfbf091312cacbd6adbeb5563
3
- size 3003443200
 
 
 
 
data/train-000005.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6363a837492be5325a1d8e2989fa2947f588949b0f5a9818bb65504907cad64
3
- size 3003064320
 
 
 
 
data/train-000006.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:77500df4b84d8af2aa87ba503a767ce0a8a693bfd32486d9b688b66cdfb172b4
3
- size 3002992640
 
 
 
 
data/train-000007.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1aa7e0787d6fe9b160de213815566a40288da40106be8d5099e301936e347d1
3
- size 3003289600
 
 
 
 
data/train-000008.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef9fa6bfeee9a8b150cd28837261e879ed1075c4b11d06c31792af0079861610
3
- size 3003443200
 
 
 
 
data/train-000009.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8cd051a16f763a0c40f3a5c7e5d720d6eb6566fb9553bd58ddeb9ae7923cad39
3
- size 3003064320
 
 
 
 
data/train-000010.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:602109f56d08318ec20e496fc161c5ba1b5a25a506b048a54f5778e19194e0a6
3
- size 3003217920
 
 
 
 
data/train-000011.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ef8723b66e1c09010f8c5305b5c7b9c526d455cd943aeac0280556fc3c5ede3
3
- size 1098700800
 
 
 
 
data/val-000000.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a75d57458afe063e6d9158a6cb3f41eabd859699f46043b0b8def2e2995049bb
3
- size 1001553920
 
 
 
 
data/val-000001.tar → default/partial/train/0000.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92dc10ad1b799fb810011fcf5e85b017f949baae919903d66612e32d37e40bf2
3
- size 14786560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc29d09d54499665177dcffaed513de113781ff9927556820cdef8c8fed5eeeb
3
+ size 216691500
data/train-000000.tar → default/partial/train/0001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a6a59407fa909a8ae70ffa8495893ef818bf6f349d0d2e486dd10333ddf256a
3
- size 3003340800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3e35af64d8e5408ab5dbeba37bebfa55aa6af7fb0dd38b9ca5b1b42273bba22
3
+ size 257676321
data/train-000001.tar → default/partial/train/0002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:717ea0c58afdd747dfaf73e592f87fcf0261c62c39a5c2b39dd10f67952ca1e3
3
- size 3003658240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2985221d8542cdebe81d98b2799373b824d251113ec43db6c5b24b5a6f15f154
3
+ size 248879498
data/train-000002.tar → default/partial/train/0003.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2530c35ecb55e64bb7fdfae9e1ddddfaa339555e37dc19d74d98048c896ec409
3
- size 3003586560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf97418e8a6a3013590de14683a36d23a4244f4b247b99acffe3d337fa4b3c60
3
+ size 252128139
default/partial/train/0004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9ef103a58bb936c79aa13f31f49906ada7719df5c842689e433e8e3ad449037
3
+ size 233894990
default/partial/train/0005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:345633293385acdb30f43883123c7542b4c3599717f91bd29450ea5cceb179bb
3
+ size 244819088
default/partial/train/0006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd0dcb5767f4eb1ab244ddbc17a7a01378a87200324ecc83891ba645959ba11
3
+ size 243801056
default/partial/train/0007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d41547b2c4e3a01058b95ad453d178558d5e77dab1c761657b0ff0da85f480e6
3
+ size 245621970
default/partial/validation/0000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126cd942582da72fc0d8fd57ad1baa3fa79f6c307cfb8a4c3082521e01016f76
3
+ size 634487865
default/partial/validation/0001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:775ac6850eadeb4668628a401640ba78275cd9efebfa5bd76e4e3fe9dda298c3
3
+ size 406412648
nyu_depth_v2.py DELETED
@@ -1,113 +0,0 @@
1
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """NYU-Depth V2."""
15
-
16
-
17
- import io
18
-
19
- import datasets
20
- import h5py
21
- import numpy as np
22
-
23
- _CITATION = """\
24
- @inproceedings{Silberman:ECCV12,
25
- author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
26
- title = {Indoor Segmentation and Support Inference from RGBD Images},
27
- booktitle = {ECCV},
28
- year = {2012}
29
- }
30
- @inproceedings{icra_2019_fastdepth,
31
- author = {Wofk, Diana and Ma, Fangchang and Yang, Tien-Ju and Karaman, Sertac and Sze, Vivienne},
32
- title = {FastDepth: Fast Monocular Depth Estimation on Embedded Systems},
33
- booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
34
- year = {2019}
35
- }
36
- """
37
-
38
- _DESCRIPTION = """\
39
- The NYU-Depth V2 data set is comprised of video sequences from a variety of indoor scenes as recorded by both the RGB and Depth cameras from the Microsoft Kinect.
40
- """
41
-
42
- _HOMEPAGE = "https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html"
43
-
44
- _LICENSE = "Apace 2.0 License"
45
-
46
- _URLS = {
47
- "train": [f"data/train-{i:06d}.tar" for i in range(12)],
48
- "val": [f"data/val-{i:06d}.tar" for i in range(2)],
49
- }
50
-
51
- _IMG_EXTENSIONS = [".h5"]
52
-
53
-
54
- class NYUDepthV2(datasets.GeneratorBasedBuilder):
55
- """NYU-Depth V2 dataset."""
56
-
57
- VERSION = datasets.Version("1.0.0")
58
-
59
- def _info(self):
60
- features = datasets.Features(
61
- {"image": datasets.Image(), "depth_map": datasets.Image()}
62
- )
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=features,
66
- homepage=_HOMEPAGE,
67
- license=_LICENSE,
68
- citation=_CITATION,
69
- )
70
-
71
- def _is_image_file(self, filename):
72
- # Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L21-L23
73
- return any(filename.endswith(extension) for extension in _IMG_EXTENSIONS)
74
-
75
- def _h5_loader(self, bytes_stream):
76
- # Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L8-L13
77
- f = io.BytesIO(bytes_stream)
78
- h5f = h5py.File(f, "r")
79
- rgb = np.array(h5f["rgb"])
80
- rgb = np.transpose(rgb, (1, 2, 0))
81
- depth = np.array(h5f["depth"])
82
- return rgb, depth
83
-
84
- def _split_generators(self, dl_manager):
85
- archives = dl_manager.download(_URLS)
86
-
87
- return [
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TRAIN,
90
- gen_kwargs={
91
- "archives": [
92
- dl_manager.iter_archive(archive) for archive in archives["train"]
93
- ]
94
- },
95
- ),
96
- datasets.SplitGenerator(
97
- name=datasets.Split.VALIDATION,
98
- gen_kwargs={
99
- "archives": [
100
- dl_manager.iter_archive(archive) for archive in archives["val"]
101
- ]
102
- },
103
- ),
104
- ]
105
-
106
- def _generate_examples(self, archives):
107
- idx = 0
108
- for archive in archives:
109
- for path, file in archive:
110
- if self._is_image_file(path):
111
- image, depth = self._h5_loader(file.read())
112
- yield idx, {"image": image, "depth_map": depth}
113
- idx += 1