Spaces:
Paused
Paused
Dean
commited on
Commit
•
068408a
1
Parent(s):
c6368bf
remove secondary requirements (i.e. not things that are explicitly installed by the user), fix normalization problem, and use tqdm for image processing progress bar
Browse files- .gitignore +1 -0
- dvc.lock +7 -2
- requirements.txt +6 -105
- src/code/make_dataset.py +6 -10
- src/code/training.py +2 -1
.gitignore
CHANGED
@@ -6,3 +6,4 @@
|
|
6 |
aws/
|
7 |
google-cloud-sdk
|
8 |
__pycache__/
|
|
|
|
6 |
aws/
|
7 |
google-cloud-sdk
|
8 |
__pycache__/
|
9 |
+
env/
|
dvc.lock
CHANGED
@@ -3,14 +3,19 @@ process_data:
|
|
3 |
src/data/processed
|
4 |
deps:
|
5 |
- path: src/code/make_dataset.py
|
6 |
-
md5:
|
|
|
7 |
- path: src/data/raw/nyu_depth_v2_labeled.mat
|
8 |
md5: 520609c519fba3ba5ac58c8fefcc3530
|
|
|
9 |
- path: src/data/raw/splits.mat
|
10 |
md5: 08e3c3aea27130ac7c01ffd739a4535f
|
|
|
11 |
outs:
|
12 |
- path: src/data/processed/
|
13 |
-
md5:
|
|
|
|
|
14 |
train:
|
15 |
cmd: python3 src/code/training.py src/data/processed
|
16 |
deps:
|
|
|
3 |
src/data/processed
|
4 |
deps:
|
5 |
- path: src/code/make_dataset.py
|
6 |
+
md5: fd5076d53909a47ce3b6598c26af6c97
|
7 |
+
size: 3783
|
8 |
- path: src/data/raw/nyu_depth_v2_labeled.mat
|
9 |
md5: 520609c519fba3ba5ac58c8fefcc3530
|
10 |
+
size: 2972037809
|
11 |
- path: src/data/raw/splits.mat
|
12 |
md5: 08e3c3aea27130ac7c01ffd739a4535f
|
13 |
+
size: 2626
|
14 |
outs:
|
15 |
- path: src/data/processed/
|
16 |
+
md5: d98a9647a37ab431bfa35815eb4afda0.dir
|
17 |
+
size: 232903470
|
18 |
+
nfiles: 2898
|
19 |
train:
|
20 |
cmd: python3 src/code/training.py src/data/processed
|
21 |
deps:
|
requirements.txt
CHANGED
@@ -1,107 +1,8 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
blis==0.4.1
|
5 |
-
cachetools==4.1.1
|
6 |
-
catalogue==1.0.0
|
7 |
-
certifi==2020.6.20
|
8 |
-
cffi==1.14.2
|
9 |
-
chardet==3.0.4
|
10 |
-
colorama==0.4.3
|
11 |
-
commonmark==0.9.1
|
12 |
-
configobj==5.0.6
|
13 |
-
cycler==0.10.0
|
14 |
-
cymem==2.0.3
|
15 |
-
dataclasses==0.6
|
16 |
-
decorator==4.4.2
|
17 |
-
dictdiffer==0.8.1
|
18 |
-
distro==1.5.0
|
19 |
-
dpath==2.0.1
|
20 |
-
dvc==1.9.1
|
21 |
-
fastai==2.0.0
|
22 |
-
fastcore==1.0.0
|
23 |
-
fastprogress==1.0.0
|
24 |
-
flatten-json==0.1.7
|
25 |
-
flufl.lock==3.2
|
26 |
-
funcy==1.14
|
27 |
-
future==0.18.2
|
28 |
-
gitdb==4.0.5
|
29 |
-
GitPython==3.1.7
|
30 |
-
google-api-core==1.22.1
|
31 |
-
google-auth==1.20.1
|
32 |
-
google-cloud-core==1.4.1
|
33 |
-
google-cloud-storage==1.19.0
|
34 |
-
google-crc32c==0.1.0
|
35 |
-
google-resumable-media==0.7.1
|
36 |
-
googleapis-common-protos==1.52.0
|
37 |
-
grandalf==0.6
|
38 |
h5py==2.10.0
|
39 |
-
idna==2.10
|
40 |
-
importlib-metadata==1.7.0
|
41 |
-
ipykernel==5.3.4
|
42 |
-
ipython==7.17.0
|
43 |
-
ipython-genutils==0.2.0
|
44 |
-
jedi==0.17.2
|
45 |
-
joblib==0.16.0
|
46 |
-
jsonpath-ng==1.5.1
|
47 |
-
kiwisolver==1.2.0
|
48 |
-
matplotlib==3.3.1
|
49 |
-
murmurhash==1.0.2
|
50 |
-
nanotime==0.5.2
|
51 |
-
networkx==2.4
|
52 |
-
numpy==1.19.1
|
53 |
-
olefile==0.46
|
54 |
opencv-python==4.4.0.42
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
pathspec==0.8.0
|
59 |
-
pexpect==4.8.0
|
60 |
-
pickleshare==0.7.5
|
61 |
-
Pillow==7.2.0
|
62 |
-
pip==20.2.2
|
63 |
-
plac==1.1.3
|
64 |
-
ply==3.11
|
65 |
-
preshed==3.0.2
|
66 |
-
prompt-toolkit==3.0.6
|
67 |
-
protobuf==3.13.0
|
68 |
-
ptyprocess==0.6.0
|
69 |
-
pyasn1==0.4.8
|
70 |
-
pyasn1-modules==0.2.8
|
71 |
-
pycparser==2.20
|
72 |
-
pydot==1.4.1
|
73 |
-
Pygments==2.6.1
|
74 |
-
pygtrie==2.3.2
|
75 |
-
pyparsing==2.4.7
|
76 |
-
python-dateutil==2.8.1
|
77 |
-
pytz==2020.1
|
78 |
-
PyYAML==5.3.1
|
79 |
-
requests==2.24.0
|
80 |
-
rich==5.2.1
|
81 |
-
rsa==4.6
|
82 |
-
ruamel.yaml==0.16.10
|
83 |
-
ruamel.yaml.clib==0.2.0
|
84 |
-
scikit-learn==0.23.2
|
85 |
-
scipy==1.5.2
|
86 |
-
shortuuid==1.0.1
|
87 |
-
shtab==1.3.1
|
88 |
-
six==1.15.0
|
89 |
-
smmap==3.0.4
|
90 |
-
spacy==2.3.2
|
91 |
-
srsly==1.0.2
|
92 |
-
tabulate==0.8.7
|
93 |
-
thinc==7.4.1
|
94 |
-
threadpoolctl==2.1.0
|
95 |
-
toml==0.10.1
|
96 |
-
torch==1.6.0
|
97 |
-
torchvision==0.7.0
|
98 |
-
tqdm==4.48.2
|
99 |
-
traitlets==4.3.3
|
100 |
-
typing-extensions==3.7.4.3
|
101 |
-
urllib3==1.25.10
|
102 |
-
voluptuous==0.11.7
|
103 |
-
wasabi==0.7.1
|
104 |
-
wcwidth==0.2.5
|
105 |
-
wheel==0.35.1
|
106 |
-
zc.lockfile==2.0
|
107 |
-
zipp==3.1.0
|
|
|
1 |
+
dvc==1.10.1
|
2 |
+
fastai==2.1.5
|
3 |
+
torch==1.7.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
h5py==2.10.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
opencv-python==4.4.0.42
|
6 |
+
tqdm==4.52.0
|
7 |
+
numpy==1.19.4
|
8 |
+
scikit-learn==0.23.2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/code/make_dataset.py
CHANGED
@@ -39,13 +39,12 @@ import os
|
|
39 |
import scipy.io
|
40 |
import sys
|
41 |
import cv2
|
|
|
42 |
|
43 |
|
44 |
def convert_image(i, scene, depth, image, folder):
|
45 |
-
|
46 |
-
|
47 |
-
normalized_depth = np.zeros(img_depth_uint16.shape)
|
48 |
-
normalized_depth = cv2.normalize(img_depth_uint16, normalized_depth, 0, 255, cv2.NORM_MINMAX)
|
49 |
cv2.imwrite("%s/%05d_depth.png" % (folder, i), normalized_depth)
|
50 |
|
51 |
image = image[:, :, ::-1]
|
@@ -75,12 +74,9 @@ if __name__ == "__main__":
|
|
75 |
print("reading", sys.argv[1])
|
76 |
|
77 |
images = h5_file['images']
|
78 |
-
scenes = [u''.join(chr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
|
79 |
-
|
80 |
-
print("processing images")
|
81 |
-
for i, image in enumerate(images):
|
82 |
-
print("image", i + 1, "/", len(images))
|
83 |
|
|
|
84 |
idx = int(i) + 1
|
85 |
if idx in train_images:
|
86 |
train_test = "train"
|
@@ -93,4 +89,4 @@ if __name__ == "__main__":
|
|
93 |
os.makedirs(folder)
|
94 |
convert_image(i, scenes[i], depth[i, :, :].T, image.T, folder)
|
95 |
|
96 |
-
print("Finished")
|
|
|
39 |
import scipy.io
|
40 |
import sys
|
41 |
import cv2
|
42 |
+
from tqdm import tqdm
|
43 |
|
44 |
|
45 |
def convert_image(i, scene, depth, image, folder):
|
46 |
+
# depth is given in meters (Kinect has a range of around .5m and 4.5m but can sense also at 8m)
|
47 |
+
normalized_depth = cv2.normalize(depth, None, 0, 255, cv2.NORM_MINMAX)
|
|
|
|
|
48 |
cv2.imwrite("%s/%05d_depth.png" % (folder, i), normalized_depth)
|
49 |
|
50 |
image = image[:, :, ::-1]
|
|
|
74 |
print("reading", sys.argv[1])
|
75 |
|
76 |
images = h5_file['images']
|
77 |
+
scenes = [u''.join(chr(c[0]) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
|
|
|
|
|
|
|
|
|
78 |
|
79 |
+
for i, image in tqdm(enumerate(images), desc="processing images", total=len(images)):
|
80 |
idx = int(i) + 1
|
81 |
if idx in train_images:
|
82 |
train_test = "train"
|
|
|
89 |
os.makedirs(folder)
|
90 |
convert_image(i, scenes[i], depth[i, :, :].T, image.T, folder)
|
91 |
|
92 |
+
print("Finished")
|
src/code/training.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
import torch
|
2 |
import sys
|
3 |
-
from
|
4 |
from torchvision.utils import save_image
|
5 |
|
|
|
6 |
class ImageImageDataLoaders(DataLoaders):
|
7 |
"Basic wrapper around several `DataLoader`s with factory methods for Image to Image problems"
|
8 |
@classmethod
|
|
|
1 |
import torch
|
2 |
import sys
|
3 |
+
from fastai.vision.all import *
|
4 |
from torchvision.utils import save_image
|
5 |
|
6 |
+
|
7 |
class ImageImageDataLoaders(DataLoaders):
|
8 |
"Basic wrapper around several `DataLoader`s with factory methods for Image to Image problems"
|
9 |
@classmethod
|