Dean Pleban commited on
Commit
2275c88
2 Parent(s): 4c15c2c 6880b4f

Merge branch 'epoch_testing_fixed' of OperationSavta/SavtaDepth into master

Browse files
Notebooks/SavtaDepth_Colab.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
Notebooks/SavtaDepth_sanity_check.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
dvc.lock CHANGED
@@ -23,8 +23,8 @@ train:
23
  md5: c94ea029ed76ca94bb1ad4c1655e5e68
24
  size: 1916
25
  - path: src/code/params.yml
26
- md5: 2263ca2167c1bb4b0f53a9aedb5f238e
27
- size: 217
28
  - path: src/code/training.py
29
  md5: e3dff7f4b59e4ebf818d7631d3e6803a
30
  size: 1683
@@ -34,14 +34,14 @@ train:
34
  nfiles: 1590
35
  outs:
36
  - path: logs/train_metrics.csv
37
- md5: 437a06e6c6c5b4f6eec5e546c1ce6930
38
- size: 103916
39
  - path: logs/train_params.yml
40
- md5: e06e92ac0f3ac1d367c22a10c28cccf9
41
- size: 886
42
  - path: src/models/
43
- md5: fab42526c433987e0e6370db31a1869d.dir
44
- size: 494927196
45
  nfiles: 1
46
  eval:
47
  cmd: python3 src/code/eval.py src/data/processed/test
@@ -50,22 +50,26 @@ eval:
50
  md5: c94ea029ed76ca94bb1ad4c1655e5e68
51
  size: 1916
52
  - path: src/code/eval.py
53
- md5: fcc66ed80bb4466ab0438f556acd125c
54
- size: 1775
55
  - path: src/code/eval_metric_calculation.py
56
  md5: 2fc866e1107042a996087d5716d44bf0
57
  size: 2999
58
  - path: src/code/params.yml
59
- md5: 2263ca2167c1bb4b0f53a9aedb5f238e
60
- size: 217
61
  - path: src/data/processed/test
62
  md5: bcccd66f3f561b53ba97c89a558c08a0.dir
63
  size: 88596370
64
  nfiles: 1308
65
  - path: src/models/model.pth
66
- md5: 2fd77305fd779eefd11e307ee3f201d7
67
- size: 494927196
68
  outs:
69
  - path: logs/test_metrics.csv
70
- md5: 0add355c58eb4dfa1ae7e28e47750d33
71
  size: 340
 
 
 
 
 
23
  md5: c94ea029ed76ca94bb1ad4c1655e5e68
24
  size: 1916
25
  - path: src/code/params.yml
26
+ md5: 88b982495a09b6d9355903e31e0b3b3f
27
+ size: 218
28
  - path: src/code/training.py
29
  md5: e3dff7f4b59e4ebf818d7631d3e6803a
30
  size: 1683
 
34
  nfiles: 1590
35
  outs:
36
  - path: logs/train_metrics.csv
37
+ md5: 15b14c6b8a3c310a7149b3f1bac5d86f
38
+ size: 5920790
39
  - path: logs/train_params.yml
40
+ md5: 4d148d75cab3dbaa91ec5fccb3382541
41
+ size: 887
42
  - path: src/models/
43
+ md5: 8586da76f372efa83d832a9d0e664817.dir
44
+ size: 494927324
45
  nfiles: 1
46
  eval:
47
  cmd: python3 src/code/eval.py src/data/processed/test
 
50
  md5: c94ea029ed76ca94bb1ad4c1655e5e68
51
  size: 1916
52
  - path: src/code/eval.py
53
+ md5: 9ea6a6624fa14f15b4d51f9139395663
54
+ size: 1893
55
  - path: src/code/eval_metric_calculation.py
56
  md5: 2fc866e1107042a996087d5716d44bf0
57
  size: 2999
58
  - path: src/code/params.yml
59
+ md5: 88b982495a09b6d9355903e31e0b3b3f
60
+ size: 218
61
  - path: src/data/processed/test
62
  md5: bcccd66f3f561b53ba97c89a558c08a0.dir
63
  size: 88596370
64
  nfiles: 1308
65
  - path: src/models/model.pth
66
+ md5: f421fb113498c7186fb734928484e013
67
+ size: 494927324
68
  outs:
69
  - path: logs/test_metrics.csv
70
+ md5: ee70c01208cdd018a567debd0abb1643
71
  size: 340
72
+ - path: src/eval/examples/
73
+ md5: 70fdd803300cbdc6dd76dec2148e0e0c.dir
74
+ size: 425678
75
+ nfiles: 10
dvc.yaml CHANGED
@@ -31,6 +31,8 @@ stages:
31
  - src/code/eval.py
32
  - src/models/model.pth
33
  - src/data/processed/test
 
 
34
  metrics:
35
  - logs/test_metrics.csv:
36
  cache: false
 
31
  - src/code/eval.py
32
  - src/models/model.pth
33
  - src/data/processed/test
34
+ outs:
35
+ - src/eval/examples/
36
  metrics:
37
  - logs/test_metrics.csv:
38
  cache: false
logs/test_metrics.csv CHANGED
@@ -1,10 +1,10 @@
1
  Name,Value,Timestamp,Step
2
- "a1",0.056999333,1613824849186,1
3
- "a2",0.118539445,1613824849186,1
4
- "a3",0.19929159,1613824849186,1
5
- "abs_rel",2.5860002,1613824849186,1
6
- "sq_rel",15.912783,1613824849186,1
7
- "rmse",5.257741,1613824849186,1
8
- "rmse_log",1.2291939,1613824849186,1
9
- "log10",0.49469143,1613824849186,1
10
- "silog",43.5198,1613824849186,1
 
1
  Name,Value,Timestamp,Step
2
+ "a1",0.6736493,1615292013951,1
3
+ "a2",0.9112526,1615292013951,1
4
+ "a3",0.975165,1615292013951,1
5
+ "abs_rel",0.21377242,1615292013951,1
6
+ "sq_rel",0.18530104,1615292013951,1
7
+ "rmse",0.6518149,1615292013951,1
8
+ "rmse_log",0.24310246,1615292013951,1
9
+ "log10",0.08433308,1615292013951,1
10
+ "silog",20.038778,1615292013951,1
logs/train_metrics.csv CHANGED
The diff for this file is too large to render. See raw diff
 
logs/train_params.yml CHANGED
@@ -1,5 +1,5 @@
1
  DAGsHubLogger: true
2
- Learner: <fastai.learner.Learner object at 0x7f051ecfcac8>
3
  ParamScheduler: true
4
  ProgressCallback: true
5
  Recorder: {add_time: true, train_metrics: false, valid_metrics: true}
@@ -14,8 +14,8 @@ dls.after_batch: "Pipeline: IntToFloatTensor -- {'div': 255.0, 'div_mask': 1} ->
14
  \ [[0.2250]]]], device='cuda:0'), 'axes': (0, 2, 3)}"
15
  dls.after_item: 'Pipeline: ToTensor'
16
  dls.before_batch: 'Pipeline: '
17
- frozen: true
18
- frozen idx: 2
19
  input 1 dim 1: 4
20
  input 1 dim 2: 3
21
  input 1 dim 3: 480
 
1
  DAGsHubLogger: true
2
+ Learner: <fastai.learner.Learner object at 0x7fcf56717fd0>
3
  ParamScheduler: true
4
  ProgressCallback: true
5
  Recorder: {add_time: true, train_metrics: false, valid_metrics: true}
 
14
  \ [[0.2250]]]], device='cuda:0'), 'axes': (0, 2, 3)}"
15
  dls.after_item: 'Pipeline: ToTensor'
16
  dls.before_batch: 'Pipeline: '
17
+ frozen: false
18
+ frozen idx: 0
19
  input 1 dim 1: 4
20
  input 1 dim 2: 3
21
  input 1 dim 3: 480
requirements.txt CHANGED
@@ -6,5 +6,5 @@ opencv-python==4.4.0.42
6
  tqdm==4.52.0
7
  numpy==1.19.4
8
  scikit-learn==0.23.2
9
- dagshub==0.1.5
10
  tables==3.6.1
 
6
  tqdm==4.52.0
7
  numpy==1.19.4
8
  scikit-learn==0.23.2
9
+ dagshub==0.1.6
10
  tables==3.6.1
src/code/eval.py CHANGED
@@ -2,7 +2,7 @@ import sys
2
  import yaml
3
  import torch
4
  from torchvision import transforms
5
- from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, tuplify
6
  from custom_data_loading import create_data
7
  from eval_metric_calculation import compute_eval_metrics
8
  from dagshub import dagshub_logger
@@ -34,10 +34,14 @@ if __name__ == "__main__":
34
  filenames = get_files(Path(data_path), extensions='.jpg')
35
  test_files = L([Path(i) for i in filenames])
36
 
37
- for sample in tqdm(test_files.items, desc="Predicting on test images", total=len(test_files.items)):
 
 
38
  pred = learner.predict(sample)[0]
39
- pred = transforms.ToPILImage()(pred[:, :, :].type(torch.FloatTensor)).convert('L')
40
  pred.save("src/eval/" + str(sample.stem) + "_pred.png")
 
 
41
 
42
  print("Calculating metrics...")
43
  metrics = compute_eval_metrics(test_files)
 
2
  import yaml
3
  import torch
4
  from torchvision import transforms
5
+ from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, PILImageBW
6
  from custom_data_loading import create_data
7
  from eval_metric_calculation import compute_eval_metrics
8
  from dagshub import dagshub_logger
 
34
  filenames = get_files(Path(data_path), extensions='.jpg')
35
  test_files = L([Path(i) for i in filenames])
36
 
37
+ for i, sample in tqdm(enumerate(test_files.items),
38
+ desc="Predicting on test images",
39
+ total=len(test_files.items)):
40
  pred = learner.predict(sample)[0]
41
+ pred = PILImageBW.create(pred).convert('L')
42
  pred.save("src/eval/" + str(sample.stem) + "_pred.png")
43
+ if i < 10:
44
+ pred.save("src/eval/examples/" + str(sample.stem) + "_pred.png")
45
 
46
  print("Calculating metrics...")
47
  metrics = compute_eval_metrics(test_files)
src/code/params.yml CHANGED
@@ -4,7 +4,7 @@ batch_size: 4
4
  num_workers: 0
5
  weight_decay: 1e-2
6
  learning_rate: 1e-3
7
- epochs: 1
8
  num_outs: 3
9
  source_dir: src
10
  model_dir: models
 
4
  num_workers: 0
5
  weight_decay: 1e-2
6
  learning_rate: 1e-3
7
+ epochs: 50
8
  num_outs: 3
9
  source_dir: src
10
  model_dir: models