Dean commited on
Commit
dc053a0
1 Parent(s): 4c15c2c

Fixed problem with eval, metrics now make sense. This is a run with 1 epoch of training

Browse files
Files changed (4) hide show
  1. dvc.lock +8 -4
  2. dvc.yaml +2 -0
  3. logs/test_metrics.csv +9 -9
  4. src/code/eval.py +7 -3
dvc.lock CHANGED
@@ -50,8 +50,8 @@ eval:
50
  md5: c94ea029ed76ca94bb1ad4c1655e5e68
51
  size: 1916
52
  - path: src/code/eval.py
53
- md5: fcc66ed80bb4466ab0438f556acd125c
54
- size: 1775
55
  - path: src/code/eval_metric_calculation.py
56
  md5: 2fc866e1107042a996087d5716d44bf0
57
  size: 2999
@@ -67,5 +67,9 @@ eval:
67
  size: 494927196
68
  outs:
69
  - path: logs/test_metrics.csv
70
- md5: 0add355c58eb4dfa1ae7e28e47750d33
71
- size: 340
 
 
 
 
50
  md5: c94ea029ed76ca94bb1ad4c1655e5e68
51
  size: 1916
52
  - path: src/code/eval.py
53
+ md5: 9ea6a6624fa14f15b4d51f9139395663
54
+ size: 1893
55
  - path: src/code/eval_metric_calculation.py
56
  md5: 2fc866e1107042a996087d5716d44bf0
57
  size: 2999
67
  size: 494927196
68
  outs:
69
  - path: logs/test_metrics.csv
70
+ md5: 80d51ef1a70b1314947919a5c37f3220
71
+ size: 341
72
+ - path: src/eval/examples/
73
+ md5: cc137b33b5f5930e304b0b65f4b546ca.dir
74
+ size: 759235
75
+ nfiles: 10
dvc.yaml CHANGED
@@ -31,6 +31,8 @@ stages:
31
  - src/code/eval.py
32
  - src/models/model.pth
33
  - src/data/processed/test
 
 
34
  metrics:
35
  - logs/test_metrics.csv:
36
  cache: false
31
  - src/code/eval.py
32
  - src/models/model.pth
33
  - src/data/processed/test
34
+ outs:
35
+ - src/eval/examples/
36
  metrics:
37
  - logs/test_metrics.csv:
38
  cache: false
logs/test_metrics.csv CHANGED
@@ -1,10 +1,10 @@
1
  Name,Value,Timestamp,Step
2
- "a1",0.056999333,1613824849186,1
3
- "a2",0.118539445,1613824849186,1
4
- "a3",0.19929159,1613824849186,1
5
- "abs_rel",2.5860002,1613824849186,1
6
- "sq_rel",15.912783,1613824849186,1
7
- "rmse",5.257741,1613824849186,1
8
- "rmse_log",1.2291939,1613824849186,1
9
- "log10",0.49469143,1613824849186,1
10
- "silog",43.5198,1613824849186,1
1
  Name,Value,Timestamp,Step
2
+ "a1",0.5386207,1615290337662,1
3
+ "a2",0.8303751,1615290337662,1
4
+ "a3",0.93854135,1615290337662,1
5
+ "abs_rel",0.29692116,1615290337662,1
6
+ "sq_rel",0.32333422,1615290337662,1
7
+ "rmse",0.848144,1615290337662,1
8
+ "rmse_log",0.32479692,1615290337662,1
9
+ "log10",0.11296987,1615290337662,1
10
+ "silog",27.511873,1615290337662,1
src/code/eval.py CHANGED
@@ -2,7 +2,7 @@ import sys
2
  import yaml
3
  import torch
4
  from torchvision import transforms
5
- from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, tuplify
6
  from custom_data_loading import create_data
7
  from eval_metric_calculation import compute_eval_metrics
8
  from dagshub import dagshub_logger
@@ -34,10 +34,14 @@ if __name__ == "__main__":
34
  filenames = get_files(Path(data_path), extensions='.jpg')
35
  test_files = L([Path(i) for i in filenames])
36
 
37
- for sample in tqdm(test_files.items, desc="Predicting on test images", total=len(test_files.items)):
 
 
38
  pred = learner.predict(sample)[0]
39
- pred = transforms.ToPILImage()(pred[:, :, :].type(torch.FloatTensor)).convert('L')
40
  pred.save("src/eval/" + str(sample.stem) + "_pred.png")
 
 
41
 
42
  print("Calculating metrics...")
43
  metrics = compute_eval_metrics(test_files)
2
  import yaml
3
  import torch
4
  from torchvision import transforms
5
+ from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, PILImageBW
6
  from custom_data_loading import create_data
7
  from eval_metric_calculation import compute_eval_metrics
8
  from dagshub import dagshub_logger
34
  filenames = get_files(Path(data_path), extensions='.jpg')
35
  test_files = L([Path(i) for i in filenames])
36
 
37
+ for i, sample in tqdm(enumerate(test_files.items),
38
+ desc="Predicting on test images",
39
+ total=len(test_files.items)):
40
  pred = learner.predict(sample)[0]
41
+ pred = PILImageBW.create(pred).convert('L')
42
  pred.save("src/eval/" + str(sample.stem) + "_pred.png")
43
+ if i < 10:
44
+ pred.save("src/eval/examples/" + str(sample.stem) + "_pred.png")
45
 
46
  print("Calculating metrics...")
47
  metrics = compute_eval_metrics(test_files)