Dean commited on
Commit
eeb74de
1 Parent(s): f24654e

Migrated to fastai2, creating the DataLoader now works but I'm stuck on not being able to change the batch_size or num_workers as the interface seems to have changed

Browse files
.gitignore CHANGED
@@ -5,3 +5,4 @@
5
  .workspace/
6
  aws/
7
  google-cloud-sdk
 
 
5
  .workspace/
6
  aws/
7
  google-cloud-sdk
8
+ __pycache__/
Makefile CHANGED
@@ -27,7 +27,11 @@ else
27
  @echo ">>> New virtual env created. Activate with:\nsource env/bin/activate ."
28
  endif
29
 
30
- requirements:
31
  @echo ">>> Installing requirements. Make sure your virtual environment is activated."
32
  $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
33
- $(PYTHON_INTERPRETER) -m pip install -r requirements.txt
 
 
 
 
 
27
  @echo ">>> New virtual env created. Activate with:\nsource env/bin/activate ."
28
  endif
29
 
30
+ load_requirements:
31
  @echo ">>> Installing requirements. Make sure your virtual environment is activated."
32
  $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
33
+ $(PYTHON_INTERPRETER) -m pip install -r requirements.txt
34
+
35
+ save_requirements:
36
+ @echo ">>> Saving requirements."
37
+ pip list --format=freeze > requirements.txt
README.md CHANGED
@@ -28,8 +28,8 @@ If you'd like to take part, please follow the guide.
28
 
29
  ```bash
30
  $ make env
31
- $ conda activate savta_depth
32
- $ make requirements
33
  ```
34
 
35
  **Note**: If you don't have a GPU you will need to install pytorch separately and then run make requirements. You can install pytorch for computers without a gpu with the following command:
@@ -49,7 +49,7 @@ If you'd like to take part, please follow the guide.
49
  * Freeze your virtualenv by typing in the terminal:
50
 
51
  ```bash
52
- pip freeze > requirements.txt
53
  ```
54
 
55
  * Push your code to DAGsHub, and your dvc managed files to your dvc remote. In order to setup a dvc remote please refer to [this guide](https://dagshub.com/docs/getting-started/set-up-remote-storage-for-data-and-models/).
 
28
 
29
  ```bash
30
  $ make env
31
+ $ source activate savta_depth
32
+ $ make load_requirements
33
  ```
34
 
35
  **Note**: If you don't have a GPU you will need to install pytorch separately and then run make requirements. You can install pytorch for computers without a gpu with the following command:
 
49
  * Freeze your virtualenv by typing in the terminal:
50
 
51
  ```bash
52
+ $ make save_requirements
53
  ```
54
 
55
  * Push your code to DAGsHub, and your dvc managed files to your dvc remote. In order to setup a dvc remote please refer to [this guide](https://dagshub.com/docs/getting-started/set-up-remote-storage-for-data-and-models/).
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
  appdirs==1.4.4
2
  atpublic==2.0
 
3
  blis==0.4.1
4
  cachetools==4.1.1
5
  catalogue==1.0.0
@@ -37,6 +38,9 @@ grandalf==0.6
37
  h5py==2.10.0
38
  idna==2.10
39
  importlib-metadata==1.7.0
 
 
 
40
  joblib==0.16.0
41
  jsonpath-ng==1.5.1
42
  kiwisolver==1.2.0
@@ -52,12 +56,18 @@ olefile==0.46
52
  opencv-python==4.4.0.42
53
  packaging==20.4
54
  pandas==1.1.1
 
55
  pathspec==0.8.0
 
 
56
  Pillow==7.2.0
 
57
  plac==1.1.3
58
  ply==3.11
59
  preshed==3.0.2
 
60
  protobuf==3.13.0
 
61
  pyasn1==0.4.8
62
  pyasn1-modules==0.2.8
63
  pycparser==2.20
@@ -75,6 +85,7 @@ ruamel.yaml==0.16.10
75
  ruamel.yaml.clib==0.2.0
76
  scikit-learn==0.23.2
77
  scipy==1.5.2
 
78
  shortuuid==1.0.1
79
  shtab==1.3.1
80
  six==1.15.0
@@ -88,9 +99,12 @@ toml==0.10.1
88
  torch==1.6.0
89
  torchvision==0.7.0
90
  tqdm==4.48.2
 
91
  typing-extensions==3.7.4.3
92
  urllib3==1.25.10
93
  voluptuous==0.11.7
94
  wasabi==0.7.1
 
 
95
  zc.lockfile==2.0
96
  zipp==3.1.0
 
1
  appdirs==1.4.4
2
  atpublic==2.0
3
+ backcall==0.2.0
4
  blis==0.4.1
5
  cachetools==4.1.1
6
  catalogue==1.0.0
 
38
  h5py==2.10.0
39
  idna==2.10
40
  importlib-metadata==1.7.0
41
+ ipython==7.17.0
42
+ ipython-genutils==0.2.0
43
+ jedi==0.17.2
44
  joblib==0.16.0
45
  jsonpath-ng==1.5.1
46
  kiwisolver==1.2.0
 
56
  opencv-python==4.4.0.42
57
  packaging==20.4
58
  pandas==1.1.1
59
+ parso==0.7.1
60
  pathspec==0.8.0
61
+ pexpect==4.8.0
62
+ pickleshare==0.7.5
63
  Pillow==7.2.0
64
+ pip==20.2.2
65
  plac==1.1.3
66
  ply==3.11
67
  preshed==3.0.2
68
+ prompt-toolkit==3.0.6
69
  protobuf==3.13.0
70
+ ptyprocess==0.6.0
71
  pyasn1==0.4.8
72
  pyasn1-modules==0.2.8
73
  pycparser==2.20
 
85
  ruamel.yaml.clib==0.2.0
86
  scikit-learn==0.23.2
87
  scipy==1.5.2
88
+ setuptools==49.6.0.post20200814
89
  shortuuid==1.0.1
90
  shtab==1.3.1
91
  six==1.15.0
 
99
  torch==1.6.0
100
  torchvision==0.7.0
101
  tqdm==4.48.2
102
+ traitlets==4.3.3
103
  typing-extensions==3.7.4.3
104
  urllib3==1.25.10
105
  voluptuous==0.11.7
106
  wasabi==0.7.1
107
+ wcwidth==0.2.5
108
+ wheel==0.35.1
109
  zc.lockfile==2.0
110
  zipp==3.1.0
run_dev_env.sh CHANGED
@@ -1,7 +1,7 @@
1
  docker run -d \
2
  -p 8080:8080 \
3
- --name "ml-workspace" -v "${PWD}:/workspace" \
4
  --env AUTHENTICATE_VIA_JUPYTER="dagshub_savta" \
5
  --shm-size 2G \
6
  --restart always \
7
- mltooling/ml-workspace-minimal:latest
 
1
  docker run -d \
2
  -p 8080:8080 \
3
+ --name "dags-ml-workspace" -v "${PWD}:/workspace" \
4
  --env AUTHENTICATE_VIA_JUPYTER="dagshub_savta" \
5
  --shm-size 2G \
6
  --restart always \
7
+ dagshub/ml-workspace-minimal:latest
src/code/__pycache__/make_dataset.cpython-37.pyc DELETED
Binary file (143 Bytes)
 
src/code/training.py CHANGED
@@ -1,6 +1,7 @@
1
  import torch
2
  import sys
3
- from fastai.vision import unet_learner, ImageImageList, models, Path, root_mean_squared_error
 
4
 
5
 
6
  def get_y_fn(x):
@@ -10,16 +11,16 @@ def get_y_fn(x):
10
  return y
11
 
12
 
13
- def create_databunch(data_path):
14
- data = (ImageImageList.from_folder(data_path)
15
- .filter_by_func(lambda fname: fname.suffix == '.jpg')
16
- .split_by_folder(train='train', valid='test')
17
- .label_from_func(get_y_fn).databunch()).normalize()
18
  return data
19
 
20
 
21
  def train(data):
22
- learner = unet_learner(data, models.resnet34, metrics=root_mean_squared_error, wd=1e-2, loss_func=torch.nn.SmoothL1Loss())
 
 
23
  learner.fit_one_cycle(1, 1e-3)
24
 
25
 
@@ -28,7 +29,7 @@ if __name__ == "__main__":
28
  print("usage: %s <data_path> <out_folder>" % sys.argv[0], file=sys.stderr)
29
  sys.exit(0)
30
 
31
- data = create_databunch(sys.argv[1])
32
  data.batch_size = 1
33
  data.num_workers = 0
34
  learner = train(data)
 
1
  import torch
2
  import sys
3
+ from fastai2.vision.all import *
4
+ from torchvision.utils import save_image
5
 
6
 
7
  def get_y_fn(x):
 
11
  return y
12
 
13
 
14
+ def create_data(data_path):
15
+ fnames = get_files(data_path/'train', extensions='.jpg')
16
+ data = SegmentationDataLoaders.from_label_func(data_path/'train', fnames=fnames, label_func=get_y_fn)
 
 
17
  return data
18
 
19
 
20
  def train(data):
21
+ data.num_workers = 0
22
+ data.bs = 1
23
+ learner = unet_learner(data, resnet34, metrics=rmse, wd=1e-2, n_out=1, loss_func=torch.nn.SmoothL1Loss())
24
  learner.fit_one_cycle(1, 1e-3)
25
 
26
 
 
29
  print("usage: %s <data_path> <out_folder>" % sys.argv[0], file=sys.stderr)
30
  sys.exit(0)
31
 
32
+ data = create_data(Path(sys.argv[1]))
33
  data.batch_size = 1
34
  data.num_workers = 0
35
  learner = train(data)