Nadine Rueegg commited on
Commit
7629b39
โ€ข
1 Parent(s): 7fd7869

initial commit for barc

Browse files
This view is limited to 50 files because it contains too many changes. ย  See raw diff
Files changed (50) hide show
  1. checkpoint/barc_complete/model_best.pth.tar +3 -0
  2. checkpoint/barc_normflow_pret/rgbddog_v3_model.pt +3 -0
  3. data/breed_data/NIHMS866262-supplement-2.xlsx +3 -0
  4. data/breed_data/complete_abbrev_dict_v2.pkl +3 -0
  5. data/breed_data/complete_summary_breeds_v2.pkl +3 -0
  6. data/smal_data/mean_dog_bone_lengths.txt +34 -0
  7. data/smal_data/my_smpl_SMBLD_nbj_v3.pkl +3 -0
  8. data/smal_data/my_smpl_data_SMBLD_v3.pkl +3 -0
  9. data/smal_data/symmetry_inds.json +3897 -0
  10. data/statistics/statistics_modified_v1.json +615 -0
  11. datasets/test_image_crops/201030094143-stock-rhodesian-ridgeback-super-tease.jpg +0 -0
  12. datasets/test_image_crops/Akita-standing-outdoors-in-the-summer-400x267.jpg +0 -0
  13. datasets/test_image_crops/image_n02089078-black-and-tan_coonhound_n02089078_3810.png +0 -0
  14. gradio_demo/barc_demo_v3.py +268 -0
  15. src/bps_2d/bps_for_segmentation.py +114 -0
  16. src/combined_model/loss_image_to_3d_withbreedrel.py +277 -0
  17. src/combined_model/model_shape_v7.py +500 -0
  18. src/combined_model/train_main_image_to_3d_withbreedrel.py +470 -0
  19. src/configs/SMAL_configs.py +165 -0
  20. src/configs/anipose_data_info.py +74 -0
  21. src/configs/barc_cfg_defaults.py +111 -0
  22. src/configs/barc_loss_weights.json +30 -0
  23. src/configs/data_info.py +115 -0
  24. src/configs/dataset_path_configs.py +21 -0
  25. src/configs/dog_breeds/dog_breed_class.py +170 -0
  26. src/lifting_to_3d/inn_model_for_shape.py +61 -0
  27. src/lifting_to_3d/linear_model.py +297 -0
  28. src/lifting_to_3d/utils/geometry_utils.py +236 -0
  29. src/metrics/metrics.py +74 -0
  30. src/priors/normalizing_flow_prior/normalizing_flow_prior.py +115 -0
  31. src/priors/shape_prior.py +40 -0
  32. src/smal_pytorch/renderer/differentiable_renderer.py +280 -0
  33. src/smal_pytorch/smal_model/batch_lbs.py +295 -0
  34. src/smal_pytorch/smal_model/smal_basics.py +82 -0
  35. src/smal_pytorch/smal_model/smal_torch_new.py +313 -0
  36. src/smal_pytorch/utils.py +13 -0
  37. src/stacked_hourglass/__init__.py +2 -0
  38. src/stacked_hourglass/datasets/__init__.py +0 -0
  39. src/stacked_hourglass/datasets/imgcrops.py +77 -0
  40. src/stacked_hourglass/datasets/imgcropslist.py +95 -0
  41. src/stacked_hourglass/datasets/samplers/custom_pair_samplers.py +171 -0
  42. src/stacked_hourglass/datasets/stanext24.py +301 -0
  43. src/stacked_hourglass/datasets/utils_stanext.py +114 -0
  44. src/stacked_hourglass/model.py +308 -0
  45. src/stacked_hourglass/predictor.py +119 -0
  46. src/stacked_hourglass/utils/__init__.py +0 -0
  47. src/stacked_hourglass/utils/evaluation.py +188 -0
  48. src/stacked_hourglass/utils/finetune.py +39 -0
  49. src/stacked_hourglass/utils/imfit.py +144 -0
  50. src/stacked_hourglass/utils/imutils.py +125 -0
checkpoint/barc_complete/model_best.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0834c7f6a298a707e748da7185bd52a318697a34d7d0462e86cf57e287fa5da3
3
+ size 549078471
checkpoint/barc_normflow_pret/rgbddog_v3_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ff03508f6b9431da1c224697ce1c68cab000758215c4a4766e136c28f828f2d
3
+ size 1725484
data/breed_data/NIHMS866262-supplement-2.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd6301ec254452ecb86df745220bef98b69d59794429c5cb452b03bb76e17eae
3
+ size 94169
data/breed_data/complete_abbrev_dict_v2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2354d2c7e3b2f7ee88f41234e138b7828d58fa6618c0ed0d0d4b12febaee8801
3
+ size 26517
data/breed_data/complete_summary_breeds_v2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95461e44d7a6924e1d9879711c865177ac7f15faa1ffb932cb42995c8eae3412
3
+ size 89004
data/smal_data/mean_dog_bone_lengths.txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0.0
2
+ 0.09044851362705231
3
+ 0.1525898575782776
4
+ 0.08656660467386246
5
+ 0.08330804109573364
6
+ 0.17591887712478638
7
+ 0.1955687403678894
8
+ 0.1663869321346283
9
+ 0.20741023123264313
10
+ 0.10695090889930725
11
+ 0.1955687403678894
12
+ 0.1663869321346283
13
+ 0.20741020143032074
14
+ 0.10695091634988785
15
+ 0.19678470492362976
16
+ 0.135447695851326
17
+ 0.10385762155056
18
+ 0.1951410472393036
19
+ 0.22369971871376038
20
+ 0.14296436309814453
21
+ 0.10385762155056
22
+ 0.1951410472393036
23
+ 0.22369973361492157
24
+ 0.14296436309814453
25
+ 0.11435563117265701
26
+ 0.1225045919418335
27
+ 0.055157795548439026
28
+ 0.07148551940917969
29
+ 0.0759430006146431
30
+ 0.09476413577795029
31
+ 0.0287716593593359
32
+ 0.11548781394958496
33
+ 0.15003003180027008
34
+ 0.15003003180027008
data/smal_data/my_smpl_SMBLD_nbj_v3.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf01081234c09445ede7079083727705e6c13a21a77bf97f305e4ad6527f06df
3
+ size 34904364
data/smal_data/my_smpl_data_SMBLD_v3.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84ad0ef6f85d662464c4d0301adede172bb241158b1cea66a810a930a9473cc8
3
+ size 31841
data/smal_data/symmetry_inds.json ADDED
@@ -0,0 +1,3897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "center_inds": [
3
+ 0,
4
+ 1,
5
+ 2,
6
+ 3,
7
+ 4,
8
+ 5,
9
+ 6,
10
+ 7,
11
+ 8,
12
+ 9,
13
+ 10,
14
+ 11,
15
+ 12,
16
+ 13,
17
+ 14,
18
+ 15,
19
+ 16,
20
+ 17,
21
+ 18,
22
+ 19,
23
+ 20,
24
+ 21,
25
+ 22,
26
+ 23,
27
+ 24,
28
+ 25,
29
+ 26,
30
+ 27,
31
+ 28,
32
+ 29,
33
+ 30,
34
+ 31,
35
+ 32,
36
+ 37,
37
+ 55,
38
+ 119,
39
+ 120,
40
+ 163,
41
+ 209,
42
+ 210,
43
+ 211,
44
+ 213,
45
+ 216,
46
+ 227,
47
+ 326,
48
+ 395,
49
+ 452,
50
+ 578,
51
+ 910,
52
+ 959,
53
+ 964,
54
+ 975,
55
+ 976,
56
+ 977,
57
+ 1172,
58
+ 1175,
59
+ 1176,
60
+ 1178,
61
+ 1194,
62
+ 1243,
63
+ 1739,
64
+ 1796,
65
+ 1797,
66
+ 1798,
67
+ 1799,
68
+ 1800,
69
+ 1801,
70
+ 1802,
71
+ 1803,
72
+ 1804,
73
+ 1805,
74
+ 1806,
75
+ 1807,
76
+ 1808,
77
+ 1809,
78
+ 1810,
79
+ 1811,
80
+ 1812,
81
+ 1813,
82
+ 1814,
83
+ 1815,
84
+ 1816,
85
+ 1817,
86
+ 1818,
87
+ 1819,
88
+ 1820,
89
+ 1821,
90
+ 1822,
91
+ 1823,
92
+ 1824,
93
+ 1825,
94
+ 1826,
95
+ 1827,
96
+ 1828,
97
+ 1829,
98
+ 1830,
99
+ 1831,
100
+ 1832,
101
+ 1833,
102
+ 1834,
103
+ 1835,
104
+ 1836,
105
+ 1837,
106
+ 1838,
107
+ 1839,
108
+ 1840,
109
+ 1842,
110
+ 1843,
111
+ 1844,
112
+ 1845,
113
+ 1846,
114
+ 1847,
115
+ 1848,
116
+ 1849,
117
+ 1850,
118
+ 1851,
119
+ 1852,
120
+ 1853,
121
+ 1854,
122
+ 1855,
123
+ 1856,
124
+ 1857,
125
+ 1858,
126
+ 1859,
127
+ 1860,
128
+ 1861,
129
+ 1862,
130
+ 1863,
131
+ 1870,
132
+ 1919,
133
+ 1960,
134
+ 1961,
135
+ 1965,
136
+ 1967,
137
+ 2003
138
+ ],
139
+ "left_inds": [
140
+ 2012,
141
+ 2013,
142
+ 2014,
143
+ 2015,
144
+ 2016,
145
+ 2017,
146
+ 2018,
147
+ 2019,
148
+ 2020,
149
+ 2021,
150
+ 2022,
151
+ 2023,
152
+ 2024,
153
+ 2025,
154
+ 2026,
155
+ 2027,
156
+ 2028,
157
+ 2029,
158
+ 2030,
159
+ 2031,
160
+ 2032,
161
+ 2033,
162
+ 2034,
163
+ 2035,
164
+ 2036,
165
+ 2037,
166
+ 2038,
167
+ 2039,
168
+ 2040,
169
+ 2041,
170
+ 2042,
171
+ 2043,
172
+ 2044,
173
+ 2045,
174
+ 2046,
175
+ 2047,
176
+ 2048,
177
+ 2049,
178
+ 2050,
179
+ 2051,
180
+ 2052,
181
+ 2053,
182
+ 2054,
183
+ 2055,
184
+ 2056,
185
+ 2057,
186
+ 2058,
187
+ 2059,
188
+ 2060,
189
+ 2061,
190
+ 2062,
191
+ 2063,
192
+ 2064,
193
+ 2065,
194
+ 2066,
195
+ 2067,
196
+ 2068,
197
+ 2069,
198
+ 2070,
199
+ 2071,
200
+ 2072,
201
+ 2073,
202
+ 2074,
203
+ 2075,
204
+ 2076,
205
+ 2077,
206
+ 2078,
207
+ 2079,
208
+ 2080,
209
+ 2081,
210
+ 2082,
211
+ 2083,
212
+ 2084,
213
+ 2085,
214
+ 2086,
215
+ 2087,
216
+ 2088,
217
+ 2089,
218
+ 2090,
219
+ 2091,
220
+ 2092,
221
+ 2093,
222
+ 2094,
223
+ 2095,
224
+ 2096,
225
+ 2097,
226
+ 2098,
227
+ 2099,
228
+ 2100,
229
+ 2101,
230
+ 2102,
231
+ 2103,
232
+ 2104,
233
+ 2105,
234
+ 2106,
235
+ 2107,
236
+ 2108,
237
+ 2109,
238
+ 2110,
239
+ 2111,
240
+ 2112,
241
+ 2113,
242
+ 2114,
243
+ 2115,
244
+ 2116,
245
+ 2117,
246
+ 2118,
247
+ 2119,
248
+ 2120,
249
+ 2121,
250
+ 2122,
251
+ 2123,
252
+ 2124,
253
+ 2125,
254
+ 2126,
255
+ 2127,
256
+ 2128,
257
+ 2129,
258
+ 2130,
259
+ 2131,
260
+ 2132,
261
+ 2133,
262
+ 2134,
263
+ 2135,
264
+ 2136,
265
+ 2137,
266
+ 2138,
267
+ 2139,
268
+ 2140,
269
+ 2141,
270
+ 2142,
271
+ 2143,
272
+ 2144,
273
+ 2145,
274
+ 2146,
275
+ 2147,
276
+ 2148,
277
+ 2149,
278
+ 2150,
279
+ 2151,
280
+ 2152,
281
+ 2153,
282
+ 2154,
283
+ 2155,
284
+ 2156,
285
+ 2157,
286
+ 2158,
287
+ 2159,
288
+ 2160,
289
+ 2161,
290
+ 2162,
291
+ 2163,
292
+ 2164,
293
+ 2165,
294
+ 2166,
295
+ 2167,
296
+ 2168,
297
+ 2169,
298
+ 2170,
299
+ 2171,
300
+ 2172,
301
+ 2173,
302
+ 2174,
303
+ 2175,
304
+ 2176,
305
+ 2177,
306
+ 2178,
307
+ 2179,
308
+ 2180,
309
+ 2181,
310
+ 2182,
311
+ 2183,
312
+ 2184,
313
+ 2185,
314
+ 2186,
315
+ 2187,
316
+ 2188,
317
+ 2189,
318
+ 2190,
319
+ 2191,
320
+ 2192,
321
+ 2193,
322
+ 2194,
323
+ 2195,
324
+ 2196,
325
+ 2197,
326
+ 2198,
327
+ 2199,
328
+ 2200,
329
+ 2201,
330
+ 2202,
331
+ 2203,
332
+ 2204,
333
+ 2205,
334
+ 2206,
335
+ 2207,
336
+ 2208,
337
+ 2209,
338
+ 2210,
339
+ 2211,
340
+ 2212,
341
+ 2213,
342
+ 2214,
343
+ 2215,
344
+ 2216,
345
+ 2217,
346
+ 2218,
347
+ 2219,
348
+ 2220,
349
+ 2221,
350
+ 2222,
351
+ 2223,
352
+ 2224,
353
+ 2225,
354
+ 2226,
355
+ 2227,
356
+ 2228,
357
+ 2229,
358
+ 2230,
359
+ 2231,
360
+ 2232,
361
+ 2233,
362
+ 2234,
363
+ 2235,
364
+ 2236,
365
+ 2237,
366
+ 2238,
367
+ 2239,
368
+ 2240,
369
+ 2241,
370
+ 2242,
371
+ 2243,
372
+ 2244,
373
+ 2245,
374
+ 2246,
375
+ 2247,
376
+ 2248,
377
+ 2249,
378
+ 2250,
379
+ 2251,
380
+ 2252,
381
+ 2253,
382
+ 2254,
383
+ 2255,
384
+ 2256,
385
+ 2257,
386
+ 2258,
387
+ 2259,
388
+ 2260,
389
+ 2261,
390
+ 2262,
391
+ 2263,
392
+ 2264,
393
+ 2265,
394
+ 2266,
395
+ 2267,
396
+ 2268,
397
+ 2269,
398
+ 2270,
399
+ 2271,
400
+ 2272,
401
+ 2273,
402
+ 2274,
403
+ 2275,
404
+ 2276,
405
+ 2277,
406
+ 2278,
407
+ 2279,
408
+ 2280,
409
+ 2281,
410
+ 2282,
411
+ 2283,
412
+ 2284,
413
+ 2285,
414
+ 2286,
415
+ 2287,
416
+ 2288,
417
+ 2289,
418
+ 2290,
419
+ 2291,
420
+ 2292,
421
+ 2293,
422
+ 2294,
423
+ 2295,
424
+ 2296,
425
+ 2297,
426
+ 2298,
427
+ 2299,
428
+ 2300,
429
+ 2301,
430
+ 2302,
431
+ 2303,
432
+ 2304,
433
+ 2305,
434
+ 2306,
435
+ 2307,
436
+ 2308,
437
+ 2309,
438
+ 2310,
439
+ 2311,
440
+ 2312,
441
+ 2313,
442
+ 2314,
443
+ 2315,
444
+ 2316,
445
+ 2317,
446
+ 2318,
447
+ 2319,
448
+ 2320,
449
+ 2321,
450
+ 2322,
451
+ 2323,
452
+ 2324,
453
+ 2325,
454
+ 2326,
455
+ 2327,
456
+ 2328,
457
+ 2329,
458
+ 2330,
459
+ 2331,
460
+ 2332,
461
+ 2333,
462
+ 2334,
463
+ 2335,
464
+ 2336,
465
+ 2337,
466
+ 2338,
467
+ 2339,
468
+ 2340,
469
+ 2341,
470
+ 2342,
471
+ 2343,
472
+ 2344,
473
+ 2345,
474
+ 2346,
475
+ 2347,
476
+ 2348,
477
+ 2349,
478
+ 2350,
479
+ 2351,
480
+ 2352,
481
+ 2353,
482
+ 2354,
483
+ 2355,
484
+ 2356,
485
+ 2357,
486
+ 2358,
487
+ 2359,
488
+ 2360,
489
+ 2361,
490
+ 2362,
491
+ 2363,
492
+ 2364,
493
+ 2365,
494
+ 2366,
495
+ 2367,
496
+ 2368,
497
+ 2369,
498
+ 2370,
499
+ 2371,
500
+ 2372,
501
+ 2373,
502
+ 2374,
503
+ 2375,
504
+ 2376,
505
+ 2377,
506
+ 2378,
507
+ 2379,
508
+ 2380,
509
+ 2381,
510
+ 2382,
511
+ 2383,
512
+ 2384,
513
+ 2385,
514
+ 2386,
515
+ 2387,
516
+ 2388,
517
+ 2389,
518
+ 2390,
519
+ 2391,
520
+ 2392,
521
+ 2393,
522
+ 2394,
523
+ 2395,
524
+ 2396,
525
+ 2397,
526
+ 2398,
527
+ 2399,
528
+ 2400,
529
+ 2401,
530
+ 2402,
531
+ 2403,
532
+ 2404,
533
+ 2405,
534
+ 2406,
535
+ 2407,
536
+ 2408,
537
+ 2409,
538
+ 2410,
539
+ 2411,
540
+ 2412,
541
+ 2413,
542
+ 2414,
543
+ 2415,
544
+ 2416,
545
+ 2417,
546
+ 2418,
547
+ 2419,
548
+ 2420,
549
+ 2421,
550
+ 2422,
551
+ 2423,
552
+ 2424,
553
+ 2425,
554
+ 2426,
555
+ 2427,
556
+ 2428,
557
+ 2429,
558
+ 2430,
559
+ 2431,
560
+ 2432,
561
+ 2433,
562
+ 2434,
563
+ 2435,
564
+ 2436,
565
+ 2437,
566
+ 2438,
567
+ 2439,
568
+ 2440,
569
+ 2441,
570
+ 2442,
571
+ 2443,
572
+ 2444,
573
+ 2445,
574
+ 2446,
575
+ 2447,
576
+ 2448,
577
+ 2449,
578
+ 2450,
579
+ 2451,
580
+ 2452,
581
+ 2453,
582
+ 2454,
583
+ 2455,
584
+ 2456,
585
+ 2457,
586
+ 2458,
587
+ 2459,
588
+ 2460,
589
+ 2461,
590
+ 2462,
591
+ 2463,
592
+ 2464,
593
+ 2465,
594
+ 2466,
595
+ 2467,
596
+ 2468,
597
+ 2469,
598
+ 2470,
599
+ 2471,
600
+ 2472,
601
+ 2473,
602
+ 2474,
603
+ 2475,
604
+ 2476,
605
+ 2477,
606
+ 2478,
607
+ 2479,
608
+ 2480,
609
+ 2481,
610
+ 2482,
611
+ 2483,
612
+ 2484,
613
+ 2485,
614
+ 2486,
615
+ 2487,
616
+ 2488,
617
+ 2489,
618
+ 2490,
619
+ 2491,
620
+ 2492,
621
+ 2493,
622
+ 2494,
623
+ 2495,
624
+ 2496,
625
+ 2497,
626
+ 2498,
627
+ 2499,
628
+ 2500,
629
+ 2501,
630
+ 2502,
631
+ 2503,
632
+ 2504,
633
+ 2505,
634
+ 2506,
635
+ 2507,
636
+ 2508,
637
+ 2509,
638
+ 2510,
639
+ 2511,
640
+ 2512,
641
+ 2513,
642
+ 2514,
643
+ 2515,
644
+ 2516,
645
+ 2517,
646
+ 2518,
647
+ 2519,
648
+ 2520,
649
+ 2521,
650
+ 2522,
651
+ 2523,
652
+ 2524,
653
+ 2525,
654
+ 2526,
655
+ 2527,
656
+ 2528,
657
+ 2529,
658
+ 2530,
659
+ 2531,
660
+ 2532,
661
+ 2533,
662
+ 2534,
663
+ 2535,
664
+ 2536,
665
+ 2537,
666
+ 2538,
667
+ 2539,
668
+ 2540,
669
+ 2541,
670
+ 2542,
671
+ 2543,
672
+ 2544,
673
+ 2545,
674
+ 2546,
675
+ 2547,
676
+ 2548,
677
+ 2549,
678
+ 2550,
679
+ 2551,
680
+ 2552,
681
+ 2553,
682
+ 2554,
683
+ 2555,
684
+ 2556,
685
+ 2557,
686
+ 2558,
687
+ 2559,
688
+ 2560,
689
+ 2561,
690
+ 2562,
691
+ 2563,
692
+ 2564,
693
+ 2565,
694
+ 2566,
695
+ 2567,
696
+ 2568,
697
+ 2569,
698
+ 2570,
699
+ 2571,
700
+ 2572,
701
+ 2573,
702
+ 2574,
703
+ 2575,
704
+ 2576,
705
+ 2577,
706
+ 2578,
707
+ 2579,
708
+ 2580,
709
+ 2581,
710
+ 2582,
711
+ 2583,
712
+ 2584,
713
+ 2585,
714
+ 2586,
715
+ 2587,
716
+ 2588,
717
+ 2589,
718
+ 2590,
719
+ 2591,
720
+ 2592,
721
+ 2593,
722
+ 2594,
723
+ 2595,
724
+ 2596,
725
+ 2597,
726
+ 2598,
727
+ 2599,
728
+ 2600,
729
+ 2601,
730
+ 2602,
731
+ 2603,
732
+ 2604,
733
+ 2605,
734
+ 2606,
735
+ 2607,
736
+ 2608,
737
+ 2609,
738
+ 2610,
739
+ 2611,
740
+ 2612,
741
+ 2613,
742
+ 2614,
743
+ 2615,
744
+ 2616,
745
+ 2617,
746
+ 2618,
747
+ 2619,
748
+ 2620,
749
+ 2621,
750
+ 2622,
751
+ 2623,
752
+ 2624,
753
+ 2625,
754
+ 2626,
755
+ 2627,
756
+ 2628,
757
+ 2629,
758
+ 2630,
759
+ 2631,
760
+ 2632,
761
+ 2633,
762
+ 2634,
763
+ 2635,
764
+ 2636,
765
+ 2637,
766
+ 2638,
767
+ 2639,
768
+ 2640,
769
+ 2641,
770
+ 2642,
771
+ 2643,
772
+ 2644,
773
+ 2645,
774
+ 2646,
775
+ 2647,
776
+ 2648,
777
+ 2649,
778
+ 2650,
779
+ 2651,
780
+ 2652,
781
+ 2653,
782
+ 2654,
783
+ 2655,
784
+ 2656,
785
+ 2657,
786
+ 2658,
787
+ 2659,
788
+ 2660,
789
+ 2661,
790
+ 2662,
791
+ 2663,
792
+ 2664,
793
+ 2665,
794
+ 2666,
795
+ 2667,
796
+ 2668,
797
+ 2669,
798
+ 2670,
799
+ 2671,
800
+ 2672,
801
+ 2673,
802
+ 2674,
803
+ 2675,
804
+ 2676,
805
+ 2677,
806
+ 2678,
807
+ 2679,
808
+ 2680,
809
+ 2681,
810
+ 2682,
811
+ 2683,
812
+ 2684,
813
+ 2685,
814
+ 2686,
815
+ 2687,
816
+ 2688,
817
+ 2689,
818
+ 2690,
819
+ 2691,
820
+ 2692,
821
+ 2693,
822
+ 2694,
823
+ 2695,
824
+ 2696,
825
+ 2697,
826
+ 2698,
827
+ 2699,
828
+ 2700,
829
+ 2701,
830
+ 2702,
831
+ 2703,
832
+ 2704,
833
+ 2705,
834
+ 2706,
835
+ 2707,
836
+ 2708,
837
+ 2709,
838
+ 2710,
839
+ 2711,
840
+ 2712,
841
+ 2713,
842
+ 2714,
843
+ 2715,
844
+ 2716,
845
+ 2717,
846
+ 2718,
847
+ 2719,
848
+ 2720,
849
+ 2721,
850
+ 2722,
851
+ 2723,
852
+ 2724,
853
+ 2725,
854
+ 2726,
855
+ 2727,
856
+ 2728,
857
+ 2729,
858
+ 2730,
859
+ 2731,
860
+ 2732,
861
+ 2733,
862
+ 2734,
863
+ 2735,
864
+ 2736,
865
+ 2737,
866
+ 2738,
867
+ 2739,
868
+ 2740,
869
+ 2741,
870
+ 2742,
871
+ 2743,
872
+ 2744,
873
+ 2745,
874
+ 2746,
875
+ 2747,
876
+ 2748,
877
+ 2749,
878
+ 2750,
879
+ 2751,
880
+ 2752,
881
+ 2753,
882
+ 2754,
883
+ 2755,
884
+ 2756,
885
+ 2757,
886
+ 2758,
887
+ 2759,
888
+ 2760,
889
+ 2761,
890
+ 2762,
891
+ 2763,
892
+ 2764,
893
+ 2765,
894
+ 2766,
895
+ 2767,
896
+ 2768,
897
+ 2769,
898
+ 2770,
899
+ 2771,
900
+ 2772,
901
+ 2773,
902
+ 2774,
903
+ 2775,
904
+ 2776,
905
+ 2777,
906
+ 2778,
907
+ 2779,
908
+ 2780,
909
+ 2781,
910
+ 2782,
911
+ 2783,
912
+ 2784,
913
+ 2785,
914
+ 2786,
915
+ 2787,
916
+ 2788,
917
+ 2789,
918
+ 2790,
919
+ 2791,
920
+ 2792,
921
+ 2793,
922
+ 2794,
923
+ 2795,
924
+ 2796,
925
+ 2797,
926
+ 2798,
927
+ 2799,
928
+ 2800,
929
+ 2801,
930
+ 2802,
931
+ 2803,
932
+ 2804,
933
+ 2805,
934
+ 2806,
935
+ 2807,
936
+ 2808,
937
+ 2809,
938
+ 2810,
939
+ 2811,
940
+ 2812,
941
+ 2813,
942
+ 2814,
943
+ 2815,
944
+ 2816,
945
+ 2817,
946
+ 2818,
947
+ 2819,
948
+ 2820,
949
+ 2821,
950
+ 2822,
951
+ 2823,
952
+ 2824,
953
+ 2825,
954
+ 2826,
955
+ 2827,
956
+ 2828,
957
+ 2829,
958
+ 2830,
959
+ 2831,
960
+ 2832,
961
+ 2833,
962
+ 2834,
963
+ 2835,
964
+ 2836,
965
+ 2837,
966
+ 2838,
967
+ 2839,
968
+ 2840,
969
+ 2841,
970
+ 2842,
971
+ 2843,
972
+ 2844,
973
+ 2845,
974
+ 2846,
975
+ 2847,
976
+ 2848,
977
+ 2849,
978
+ 2850,
979
+ 2851,
980
+ 2852,
981
+ 2853,
982
+ 2854,
983
+ 2855,
984
+ 2856,
985
+ 2857,
986
+ 2858,
987
+ 2859,
988
+ 2860,
989
+ 2861,
990
+ 2862,
991
+ 2863,
992
+ 2864,
993
+ 2865,
994
+ 2866,
995
+ 2867,
996
+ 2868,
997
+ 2869,
998
+ 2870,
999
+ 2871,
1000
+ 2872,
1001
+ 2873,
1002
+ 2874,
1003
+ 2875,
1004
+ 2876,
1005
+ 2877,
1006
+ 2878,
1007
+ 2879,
1008
+ 2880,
1009
+ 2881,
1010
+ 2882,
1011
+ 2883,
1012
+ 2884,
1013
+ 2885,
1014
+ 2886,
1015
+ 2887,
1016
+ 2888,
1017
+ 2889,
1018
+ 2890,
1019
+ 2891,
1020
+ 2892,
1021
+ 2893,
1022
+ 2894,
1023
+ 2895,
1024
+ 2896,
1025
+ 2897,
1026
+ 2898,
1027
+ 2899,
1028
+ 2900,
1029
+ 2901,
1030
+ 2902,
1031
+ 2903,
1032
+ 2904,
1033
+ 2905,
1034
+ 2906,
1035
+ 2907,
1036
+ 2908,
1037
+ 2909,
1038
+ 2910,
1039
+ 2911,
1040
+ 2912,
1041
+ 2913,
1042
+ 2914,
1043
+ 2915,
1044
+ 2916,
1045
+ 2917,
1046
+ 2918,
1047
+ 2919,
1048
+ 2920,
1049
+ 2921,
1050
+ 2922,
1051
+ 2923,
1052
+ 2924,
1053
+ 2925,
1054
+ 2926,
1055
+ 2927,
1056
+ 2928,
1057
+ 2929,
1058
+ 2930,
1059
+ 2931,
1060
+ 2932,
1061
+ 2933,
1062
+ 2934,
1063
+ 2935,
1064
+ 2936,
1065
+ 2937,
1066
+ 2938,
1067
+ 2939,
1068
+ 2940,
1069
+ 2941,
1070
+ 2942,
1071
+ 2943,
1072
+ 2944,
1073
+ 2945,
1074
+ 2946,
1075
+ 2947,
1076
+ 2948,
1077
+ 2949,
1078
+ 2950,
1079
+ 2951,
1080
+ 2952,
1081
+ 2953,
1082
+ 2954,
1083
+ 2955,
1084
+ 2956,
1085
+ 2957,
1086
+ 2958,
1087
+ 2959,
1088
+ 2960,
1089
+ 2961,
1090
+ 2962,
1091
+ 2963,
1092
+ 2964,
1093
+ 2965,
1094
+ 2966,
1095
+ 2967,
1096
+ 2968,
1097
+ 2969,
1098
+ 2970,
1099
+ 2971,
1100
+ 2972,
1101
+ 2973,
1102
+ 2974,
1103
+ 2975,
1104
+ 2976,
1105
+ 2977,
1106
+ 2978,
1107
+ 2979,
1108
+ 2980,
1109
+ 2981,
1110
+ 2982,
1111
+ 2983,
1112
+ 2984,
1113
+ 2985,
1114
+ 2986,
1115
+ 2987,
1116
+ 2988,
1117
+ 2989,
1118
+ 2990,
1119
+ 2991,
1120
+ 2992,
1121
+ 2993,
1122
+ 2994,
1123
+ 2995,
1124
+ 2996,
1125
+ 2997,
1126
+ 2998,
1127
+ 2999,
1128
+ 3000,
1129
+ 3001,
1130
+ 3002,
1131
+ 3003,
1132
+ 3004,
1133
+ 3005,
1134
+ 3006,
1135
+ 3007,
1136
+ 3008,
1137
+ 3009,
1138
+ 3010,
1139
+ 3011,
1140
+ 3012,
1141
+ 3013,
1142
+ 3014,
1143
+ 3015,
1144
+ 3016,
1145
+ 3017,
1146
+ 3018,
1147
+ 3019,
1148
+ 3020,
1149
+ 3021,
1150
+ 3022,
1151
+ 3023,
1152
+ 3024,
1153
+ 3025,
1154
+ 3026,
1155
+ 3027,
1156
+ 3028,
1157
+ 3029,
1158
+ 3030,
1159
+ 3031,
1160
+ 3032,
1161
+ 3033,
1162
+ 3034,
1163
+ 3035,
1164
+ 3036,
1165
+ 3037,
1166
+ 3038,
1167
+ 3039,
1168
+ 3040,
1169
+ 3041,
1170
+ 3042,
1171
+ 3043,
1172
+ 3044,
1173
+ 3045,
1174
+ 3046,
1175
+ 3047,
1176
+ 3048,
1177
+ 3049,
1178
+ 3050,
1179
+ 3051,
1180
+ 3052,
1181
+ 3053,
1182
+ 3054,
1183
+ 3055,
1184
+ 3056,
1185
+ 3057,
1186
+ 3058,
1187
+ 3059,
1188
+ 3060,
1189
+ 3061,
1190
+ 3062,
1191
+ 3063,
1192
+ 3064,
1193
+ 3065,
1194
+ 3066,
1195
+ 3067,
1196
+ 3068,
1197
+ 3069,
1198
+ 3070,
1199
+ 3071,
1200
+ 3072,
1201
+ 3073,
1202
+ 3074,
1203
+ 3075,
1204
+ 3076,
1205
+ 3077,
1206
+ 3078,
1207
+ 3079,
1208
+ 3080,
1209
+ 3081,
1210
+ 3082,
1211
+ 3083,
1212
+ 3084,
1213
+ 3085,
1214
+ 3086,
1215
+ 3087,
1216
+ 3088,
1217
+ 3089,
1218
+ 3090,
1219
+ 3091,
1220
+ 3092,
1221
+ 3093,
1222
+ 3094,
1223
+ 3095,
1224
+ 3096,
1225
+ 3097,
1226
+ 3098,
1227
+ 3099,
1228
+ 3100,
1229
+ 3101,
1230
+ 3102,
1231
+ 3103,
1232
+ 3104,
1233
+ 3105,
1234
+ 3106,
1235
+ 3107,
1236
+ 3108,
1237
+ 3109,
1238
+ 3110,
1239
+ 3111,
1240
+ 3112,
1241
+ 3113,
1242
+ 3114,
1243
+ 3115,
1244
+ 3116,
1245
+ 3117,
1246
+ 3118,
1247
+ 3119,
1248
+ 3120,
1249
+ 3121,
1250
+ 3122,
1251
+ 3123,
1252
+ 3124,
1253
+ 3125,
1254
+ 3126,
1255
+ 3127,
1256
+ 3128,
1257
+ 3129,
1258
+ 3130,
1259
+ 3131,
1260
+ 3132,
1261
+ 3133,
1262
+ 3134,
1263
+ 3135,
1264
+ 3136,
1265
+ 3137,
1266
+ 3138,
1267
+ 3139,
1268
+ 3140,
1269
+ 3141,
1270
+ 3142,
1271
+ 3143,
1272
+ 3144,
1273
+ 3145,
1274
+ 3146,
1275
+ 3147,
1276
+ 3148,
1277
+ 3149,
1278
+ 3150,
1279
+ 3151,
1280
+ 3152,
1281
+ 3153,
1282
+ 3154,
1283
+ 3155,
1284
+ 3156,
1285
+ 3157,
1286
+ 3158,
1287
+ 3159,
1288
+ 3160,
1289
+ 3161,
1290
+ 3162,
1291
+ 3163,
1292
+ 3164,
1293
+ 3165,
1294
+ 3166,
1295
+ 3167,
1296
+ 3168,
1297
+ 3169,
1298
+ 3170,
1299
+ 3171,
1300
+ 3172,
1301
+ 3173,
1302
+ 3174,
1303
+ 3175,
1304
+ 3176,
1305
+ 3177,
1306
+ 3178,
1307
+ 3179,
1308
+ 3180,
1309
+ 3181,
1310
+ 3182,
1311
+ 3183,
1312
+ 3184,
1313
+ 3185,
1314
+ 3186,
1315
+ 3187,
1316
+ 3188,
1317
+ 3189,
1318
+ 3190,
1319
+ 3191,
1320
+ 3192,
1321
+ 3193,
1322
+ 3194,
1323
+ 3195,
1324
+ 3196,
1325
+ 3197,
1326
+ 3198,
1327
+ 3199,
1328
+ 3200,
1329
+ 3201,
1330
+ 3202,
1331
+ 3203,
1332
+ 3204,
1333
+ 3205,
1334
+ 3206,
1335
+ 3207,
1336
+ 3208,
1337
+ 3209,
1338
+ 3210,
1339
+ 3211,
1340
+ 3212,
1341
+ 3213,
1342
+ 3214,
1343
+ 3215,
1344
+ 3216,
1345
+ 3217,
1346
+ 3218,
1347
+ 3219,
1348
+ 3220,
1349
+ 3221,
1350
+ 3222,
1351
+ 3223,
1352
+ 3224,
1353
+ 3225,
1354
+ 3226,
1355
+ 3227,
1356
+ 3228,
1357
+ 3229,
1358
+ 3230,
1359
+ 3231,
1360
+ 3232,
1361
+ 3233,
1362
+ 3234,
1363
+ 3235,
1364
+ 3236,
1365
+ 3237,
1366
+ 3238,
1367
+ 3239,
1368
+ 3240,
1369
+ 3241,
1370
+ 3242,
1371
+ 3243,
1372
+ 3244,
1373
+ 3245,
1374
+ 3246,
1375
+ 3247,
1376
+ 3248,
1377
+ 3249,
1378
+ 3250,
1379
+ 3251,
1380
+ 3252,
1381
+ 3253,
1382
+ 3254,
1383
+ 3255,
1384
+ 3256,
1385
+ 3257,
1386
+ 3258,
1387
+ 3259,
1388
+ 3260,
1389
+ 3261,
1390
+ 3262,
1391
+ 3263,
1392
+ 3264,
1393
+ 3265,
1394
+ 3266,
1395
+ 3267,
1396
+ 3268,
1397
+ 3269,
1398
+ 3270,
1399
+ 3271,
1400
+ 3272,
1401
+ 3273,
1402
+ 3274,
1403
+ 3275,
1404
+ 3276,
1405
+ 3277,
1406
+ 3278,
1407
+ 3279,
1408
+ 3280,
1409
+ 3281,
1410
+ 3282,
1411
+ 3283,
1412
+ 3284,
1413
+ 3285,
1414
+ 3286,
1415
+ 3287,
1416
+ 3288,
1417
+ 3289,
1418
+ 3290,
1419
+ 3291,
1420
+ 3292,
1421
+ 3293,
1422
+ 3294,
1423
+ 3295,
1424
+ 3296,
1425
+ 3297,
1426
+ 3298,
1427
+ 3299,
1428
+ 3300,
1429
+ 3301,
1430
+ 3302,
1431
+ 3303,
1432
+ 3304,
1433
+ 3305,
1434
+ 3306,
1435
+ 3307,
1436
+ 3308,
1437
+ 3309,
1438
+ 3310,
1439
+ 3311,
1440
+ 3312,
1441
+ 3313,
1442
+ 3314,
1443
+ 3315,
1444
+ 3316,
1445
+ 3317,
1446
+ 3318,
1447
+ 3319,
1448
+ 3320,
1449
+ 3321,
1450
+ 3322,
1451
+ 3323,
1452
+ 3324,
1453
+ 3325,
1454
+ 3326,
1455
+ 3327,
1456
+ 3328,
1457
+ 3329,
1458
+ 3330,
1459
+ 3331,
1460
+ 3332,
1461
+ 3333,
1462
+ 3334,
1463
+ 3335,
1464
+ 3336,
1465
+ 3337,
1466
+ 3338,
1467
+ 3339,
1468
+ 3340,
1469
+ 3341,
1470
+ 3342,
1471
+ 3343,
1472
+ 3344,
1473
+ 3345,
1474
+ 3346,
1475
+ 3347,
1476
+ 3348,
1477
+ 3349,
1478
+ 3350,
1479
+ 3351,
1480
+ 3352,
1481
+ 3353,
1482
+ 3354,
1483
+ 3355,
1484
+ 3356,
1485
+ 3357,
1486
+ 3358,
1487
+ 3359,
1488
+ 3360,
1489
+ 3361,
1490
+ 3362,
1491
+ 3363,
1492
+ 3364,
1493
+ 3365,
1494
+ 3366,
1495
+ 3367,
1496
+ 3368,
1497
+ 3369,
1498
+ 3370,
1499
+ 3371,
1500
+ 3372,
1501
+ 3373,
1502
+ 3374,
1503
+ 3375,
1504
+ 3376,
1505
+ 3377,
1506
+ 3378,
1507
+ 3379,
1508
+ 3380,
1509
+ 3381,
1510
+ 3382,
1511
+ 3383,
1512
+ 3384,
1513
+ 3385,
1514
+ 3386,
1515
+ 3387,
1516
+ 3388,
1517
+ 3389,
1518
+ 3390,
1519
+ 3391,
1520
+ 3392,
1521
+ 3393,
1522
+ 3394,
1523
+ 3395,
1524
+ 3396,
1525
+ 3397,
1526
+ 3398,
1527
+ 3399,
1528
+ 3400,
1529
+ 3401,
1530
+ 3402,
1531
+ 3403,
1532
+ 3404,
1533
+ 3405,
1534
+ 3406,
1535
+ 3407,
1536
+ 3408,
1537
+ 3409,
1538
+ 3410,
1539
+ 3411,
1540
+ 3412,
1541
+ 3413,
1542
+ 3414,
1543
+ 3415,
1544
+ 3416,
1545
+ 3417,
1546
+ 3418,
1547
+ 3419,
1548
+ 3420,
1549
+ 3421,
1550
+ 3422,
1551
+ 3423,
1552
+ 3424,
1553
+ 3425,
1554
+ 3426,
1555
+ 3427,
1556
+ 3428,
1557
+ 3429,
1558
+ 3430,
1559
+ 3431,
1560
+ 3432,
1561
+ 3433,
1562
+ 3434,
1563
+ 3435,
1564
+ 3436,
1565
+ 3437,
1566
+ 3438,
1567
+ 3439,
1568
+ 3440,
1569
+ 3441,
1570
+ 3442,
1571
+ 3443,
1572
+ 3444,
1573
+ 3445,
1574
+ 3446,
1575
+ 3447,
1576
+ 3448,
1577
+ 3449,
1578
+ 3450,
1579
+ 3451,
1580
+ 3452,
1581
+ 3453,
1582
+ 3454,
1583
+ 3455,
1584
+ 3456,
1585
+ 3457,
1586
+ 3458,
1587
+ 3459,
1588
+ 3460,
1589
+ 3461,
1590
+ 3462,
1591
+ 3463,
1592
+ 3464,
1593
+ 3465,
1594
+ 3466,
1595
+ 3467,
1596
+ 3468,
1597
+ 3469,
1598
+ 3470,
1599
+ 3471,
1600
+ 3472,
1601
+ 3473,
1602
+ 3474,
1603
+ 3475,
1604
+ 3476,
1605
+ 3477,
1606
+ 3478,
1607
+ 3479,
1608
+ 3480,
1609
+ 3481,
1610
+ 3482,
1611
+ 3483,
1612
+ 3484,
1613
+ 3485,
1614
+ 3486,
1615
+ 3487,
1616
+ 3488,
1617
+ 3489,
1618
+ 3490,
1619
+ 3491,
1620
+ 3492,
1621
+ 3493,
1622
+ 3494,
1623
+ 3495,
1624
+ 3496,
1625
+ 3497,
1626
+ 3498,
1627
+ 3499,
1628
+ 3500,
1629
+ 3501,
1630
+ 3502,
1631
+ 3503,
1632
+ 3504,
1633
+ 3505,
1634
+ 3506,
1635
+ 3507,
1636
+ 3508,
1637
+ 3509,
1638
+ 3510,
1639
+ 3511,
1640
+ 3512,
1641
+ 3513,
1642
+ 3514,
1643
+ 3515,
1644
+ 3516,
1645
+ 3517,
1646
+ 3518,
1647
+ 3519,
1648
+ 3520,
1649
+ 3521,
1650
+ 3522,
1651
+ 3523,
1652
+ 3524,
1653
+ 3525,
1654
+ 3526,
1655
+ 3527,
1656
+ 3528,
1657
+ 3529,
1658
+ 3530,
1659
+ 3531,
1660
+ 3532,
1661
+ 3533,
1662
+ 3534,
1663
+ 3535,
1664
+ 3536,
1665
+ 3537,
1666
+ 3538,
1667
+ 3539,
1668
+ 3540,
1669
+ 3541,
1670
+ 3542,
1671
+ 3543,
1672
+ 3544,
1673
+ 3545,
1674
+ 3546,
1675
+ 3547,
1676
+ 3548,
1677
+ 3549,
1678
+ 3550,
1679
+ 3551,
1680
+ 3552,
1681
+ 3553,
1682
+ 3554,
1683
+ 3555,
1684
+ 3556,
1685
+ 3557,
1686
+ 3558,
1687
+ 3559,
1688
+ 3560,
1689
+ 3561,
1690
+ 3562,
1691
+ 3563,
1692
+ 3564,
1693
+ 3565,
1694
+ 3566,
1695
+ 3567,
1696
+ 3568,
1697
+ 3569,
1698
+ 3570,
1699
+ 3571,
1700
+ 3572,
1701
+ 3573,
1702
+ 3574,
1703
+ 3575,
1704
+ 3576,
1705
+ 3577,
1706
+ 3578,
1707
+ 3579,
1708
+ 3580,
1709
+ 3581,
1710
+ 3582,
1711
+ 3583,
1712
+ 3584,
1713
+ 3585,
1714
+ 3586,
1715
+ 3587,
1716
+ 3588,
1717
+ 3589,
1718
+ 3590,
1719
+ 3591,
1720
+ 3592,
1721
+ 3593,
1722
+ 3594,
1723
+ 3595,
1724
+ 3596,
1725
+ 3597,
1726
+ 3598,
1727
+ 3599,
1728
+ 3600,
1729
+ 3601,
1730
+ 3602,
1731
+ 3603,
1732
+ 3604,
1733
+ 3605,
1734
+ 3606,
1735
+ 3607,
1736
+ 3608,
1737
+ 3609,
1738
+ 3610,
1739
+ 3611,
1740
+ 3612,
1741
+ 3613,
1742
+ 3614,
1743
+ 3615,
1744
+ 3616,
1745
+ 3617,
1746
+ 3618,
1747
+ 3619,
1748
+ 3620,
1749
+ 3621,
1750
+ 3622,
1751
+ 3623,
1752
+ 3624,
1753
+ 3625,
1754
+ 3626,
1755
+ 3627,
1756
+ 3628,
1757
+ 3629,
1758
+ 3630,
1759
+ 3631,
1760
+ 3632,
1761
+ 3633,
1762
+ 3634,
1763
+ 3635,
1764
+ 3636,
1765
+ 3637,
1766
+ 3638,
1767
+ 3639,
1768
+ 3640,
1769
+ 3641,
1770
+ 3642,
1771
+ 3643,
1772
+ 3644,
1773
+ 3645,
1774
+ 3646,
1775
+ 3647,
1776
+ 3648,
1777
+ 3649,
1778
+ 3650,
1779
+ 3651,
1780
+ 3652,
1781
+ 3653,
1782
+ 3654,
1783
+ 3655,
1784
+ 3656,
1785
+ 3657,
1786
+ 3658,
1787
+ 3659,
1788
+ 3660,
1789
+ 3661,
1790
+ 3662,
1791
+ 3663,
1792
+ 3664,
1793
+ 3665,
1794
+ 3666,
1795
+ 3667,
1796
+ 3668,
1797
+ 3669,
1798
+ 3670,
1799
+ 3671,
1800
+ 3672,
1801
+ 3673,
1802
+ 3674,
1803
+ 3675,
1804
+ 3676,
1805
+ 3677,
1806
+ 3678,
1807
+ 3679,
1808
+ 3680,
1809
+ 3681,
1810
+ 3682,
1811
+ 3683,
1812
+ 3684,
1813
+ 3685,
1814
+ 3686,
1815
+ 3687,
1816
+ 3688,
1817
+ 3689,
1818
+ 3690,
1819
+ 3691,
1820
+ 3692,
1821
+ 3693,
1822
+ 3694,
1823
+ 3695,
1824
+ 3696,
1825
+ 3697,
1826
+ 3698,
1827
+ 3699,
1828
+ 3700,
1829
+ 3701,
1830
+ 3702,
1831
+ 3703,
1832
+ 3704,
1833
+ 3705,
1834
+ 3706,
1835
+ 3707,
1836
+ 3708,
1837
+ 3709,
1838
+ 3710,
1839
+ 3711,
1840
+ 3712,
1841
+ 3713,
1842
+ 3714,
1843
+ 3715,
1844
+ 3716,
1845
+ 3717,
1846
+ 3718,
1847
+ 3719,
1848
+ 3720,
1849
+ 3721,
1850
+ 3722,
1851
+ 3723,
1852
+ 3724,
1853
+ 3725,
1854
+ 3726,
1855
+ 3727,
1856
+ 3728,
1857
+ 3729,
1858
+ 3730,
1859
+ 3731,
1860
+ 3732,
1861
+ 3733,
1862
+ 3734,
1863
+ 3735,
1864
+ 3736,
1865
+ 3737,
1866
+ 3738,
1867
+ 3739,
1868
+ 3740,
1869
+ 3741,
1870
+ 3742,
1871
+ 3743,
1872
+ 3744,
1873
+ 3745,
1874
+ 3746,
1875
+ 3747,
1876
+ 3748,
1877
+ 3749,
1878
+ 3750,
1879
+ 3751,
1880
+ 3752,
1881
+ 3753,
1882
+ 3754,
1883
+ 3755,
1884
+ 3756,
1885
+ 3757,
1886
+ 3758,
1887
+ 3759,
1888
+ 3760,
1889
+ 3761,
1890
+ 3762,
1891
+ 3763,
1892
+ 3764,
1893
+ 3765,
1894
+ 3766,
1895
+ 3767,
1896
+ 3768,
1897
+ 3769,
1898
+ 3770,
1899
+ 3771,
1900
+ 3772,
1901
+ 3773,
1902
+ 3774,
1903
+ 3775,
1904
+ 3776,
1905
+ 3777,
1906
+ 3778,
1907
+ 3779,
1908
+ 3780,
1909
+ 3781,
1910
+ 3782,
1911
+ 3783,
1912
+ 3784,
1913
+ 3785,
1914
+ 3786,
1915
+ 3787,
1916
+ 3788,
1917
+ 3789,
1918
+ 3790,
1919
+ 3791,
1920
+ 3792,
1921
+ 3793,
1922
+ 3794,
1923
+ 3795,
1924
+ 3796,
1925
+ 3797,
1926
+ 3798,
1927
+ 3799,
1928
+ 3800,
1929
+ 3801,
1930
+ 3802,
1931
+ 3803,
1932
+ 3804,
1933
+ 3805,
1934
+ 3806,
1935
+ 3807,
1936
+ 3808,
1937
+ 3809,
1938
+ 3810,
1939
+ 3811,
1940
+ 3812,
1941
+ 3813,
1942
+ 3814,
1943
+ 3815,
1944
+ 3816,
1945
+ 3817,
1946
+ 3818,
1947
+ 3819,
1948
+ 3820,
1949
+ 3821,
1950
+ 3822,
1951
+ 3823,
1952
+ 3824,
1953
+ 3825,
1954
+ 3826,
1955
+ 3827,
1956
+ 3828,
1957
+ 3829,
1958
+ 3830,
1959
+ 3831,
1960
+ 3832,
1961
+ 3833,
1962
+ 3834,
1963
+ 3835,
1964
+ 3836,
1965
+ 3837,
1966
+ 3838,
1967
+ 3839,
1968
+ 3840,
1969
+ 3841,
1970
+ 3842,
1971
+ 3843,
1972
+ 3844,
1973
+ 3845,
1974
+ 3846,
1975
+ 3847,
1976
+ 3848,
1977
+ 3849,
1978
+ 3850,
1979
+ 3851,
1980
+ 3852,
1981
+ 3853,
1982
+ 3854,
1983
+ 3855,
1984
+ 3856,
1985
+ 3857,
1986
+ 3858,
1987
+ 3859,
1988
+ 3860,
1989
+ 3861,
1990
+ 3862,
1991
+ 3863,
1992
+ 3864,
1993
+ 3865,
1994
+ 3866,
1995
+ 3867,
1996
+ 3868,
1997
+ 3869,
1998
+ 3870,
1999
+ 3871,
2000
+ 3872,
2001
+ 3873,
2002
+ 3874,
2003
+ 3875,
2004
+ 3876,
2005
+ 3877,
2006
+ 3878,
2007
+ 3879,
2008
+ 3880,
2009
+ 3881,
2010
+ 3882,
2011
+ 3883,
2012
+ 3884,
2013
+ 3885,
2014
+ 3886,
2015
+ 3887,
2016
+ 3888
2017
+ ],
2018
+ "right_inds": [
2019
+ 33,
2020
+ 34,
2021
+ 35,
2022
+ 36,
2023
+ 38,
2024
+ 39,
2025
+ 40,
2026
+ 41,
2027
+ 42,
2028
+ 43,
2029
+ 44,
2030
+ 45,
2031
+ 46,
2032
+ 47,
2033
+ 48,
2034
+ 49,
2035
+ 50,
2036
+ 51,
2037
+ 52,
2038
+ 53,
2039
+ 54,
2040
+ 56,
2041
+ 57,
2042
+ 58,
2043
+ 59,
2044
+ 60,
2045
+ 61,
2046
+ 62,
2047
+ 63,
2048
+ 64,
2049
+ 65,
2050
+ 66,
2051
+ 67,
2052
+ 68,
2053
+ 69,
2054
+ 70,
2055
+ 71,
2056
+ 72,
2057
+ 73,
2058
+ 74,
2059
+ 75,
2060
+ 76,
2061
+ 77,
2062
+ 78,
2063
+ 79,
2064
+ 80,
2065
+ 81,
2066
+ 82,
2067
+ 83,
2068
+ 84,
2069
+ 85,
2070
+ 86,
2071
+ 87,
2072
+ 88,
2073
+ 89,
2074
+ 90,
2075
+ 91,
2076
+ 92,
2077
+ 93,
2078
+ 94,
2079
+ 95,
2080
+ 96,
2081
+ 97,
2082
+ 98,
2083
+ 99,
2084
+ 100,
2085
+ 101,
2086
+ 102,
2087
+ 103,
2088
+ 104,
2089
+ 105,
2090
+ 106,
2091
+ 107,
2092
+ 108,
2093
+ 109,
2094
+ 110,
2095
+ 111,
2096
+ 112,
2097
+ 113,
2098
+ 114,
2099
+ 115,
2100
+ 116,
2101
+ 117,
2102
+ 118,
2103
+ 121,
2104
+ 122,
2105
+ 123,
2106
+ 124,
2107
+ 125,
2108
+ 126,
2109
+ 127,
2110
+ 128,
2111
+ 129,
2112
+ 130,
2113
+ 131,
2114
+ 132,
2115
+ 133,
2116
+ 134,
2117
+ 135,
2118
+ 136,
2119
+ 137,
2120
+ 138,
2121
+ 139,
2122
+ 140,
2123
+ 141,
2124
+ 142,
2125
+ 143,
2126
+ 144,
2127
+ 145,
2128
+ 146,
2129
+ 147,
2130
+ 148,
2131
+ 149,
2132
+ 150,
2133
+ 151,
2134
+ 152,
2135
+ 153,
2136
+ 154,
2137
+ 155,
2138
+ 156,
2139
+ 157,
2140
+ 158,
2141
+ 159,
2142
+ 160,
2143
+ 161,
2144
+ 162,
2145
+ 164,
2146
+ 165,
2147
+ 166,
2148
+ 167,
2149
+ 168,
2150
+ 169,
2151
+ 170,
2152
+ 171,
2153
+ 172,
2154
+ 173,
2155
+ 174,
2156
+ 175,
2157
+ 176,
2158
+ 177,
2159
+ 178,
2160
+ 179,
2161
+ 180,
2162
+ 181,
2163
+ 182,
2164
+ 183,
2165
+ 184,
2166
+ 185,
2167
+ 186,
2168
+ 187,
2169
+ 188,
2170
+ 189,
2171
+ 190,
2172
+ 191,
2173
+ 192,
2174
+ 193,
2175
+ 194,
2176
+ 195,
2177
+ 196,
2178
+ 197,
2179
+ 198,
2180
+ 199,
2181
+ 200,
2182
+ 201,
2183
+ 202,
2184
+ 203,
2185
+ 204,
2186
+ 205,
2187
+ 206,
2188
+ 207,
2189
+ 208,
2190
+ 212,
2191
+ 214,
2192
+ 215,
2193
+ 217,
2194
+ 218,
2195
+ 219,
2196
+ 220,
2197
+ 221,
2198
+ 222,
2199
+ 223,
2200
+ 224,
2201
+ 225,
2202
+ 226,
2203
+ 228,
2204
+ 229,
2205
+ 230,
2206
+ 231,
2207
+ 232,
2208
+ 233,
2209
+ 234,
2210
+ 235,
2211
+ 236,
2212
+ 237,
2213
+ 238,
2214
+ 239,
2215
+ 240,
2216
+ 241,
2217
+ 242,
2218
+ 243,
2219
+ 244,
2220
+ 245,
2221
+ 246,
2222
+ 247,
2223
+ 248,
2224
+ 249,
2225
+ 250,
2226
+ 251,
2227
+ 252,
2228
+ 253,
2229
+ 254,
2230
+ 255,
2231
+ 256,
2232
+ 257,
2233
+ 258,
2234
+ 259,
2235
+ 260,
2236
+ 261,
2237
+ 262,
2238
+ 263,
2239
+ 264,
2240
+ 265,
2241
+ 266,
2242
+ 267,
2243
+ 268,
2244
+ 269,
2245
+ 270,
2246
+ 271,
2247
+ 272,
2248
+ 273,
2249
+ 274,
2250
+ 275,
2251
+ 276,
2252
+ 277,
2253
+ 278,
2254
+ 279,
2255
+ 280,
2256
+ 281,
2257
+ 282,
2258
+ 283,
2259
+ 284,
2260
+ 285,
2261
+ 286,
2262
+ 287,
2263
+ 288,
2264
+ 289,
2265
+ 290,
2266
+ 291,
2267
+ 292,
2268
+ 293,
2269
+ 294,
2270
+ 295,
2271
+ 296,
2272
+ 297,
2273
+ 298,
2274
+ 299,
2275
+ 300,
2276
+ 301,
2277
+ 302,
2278
+ 303,
2279
+ 304,
2280
+ 305,
2281
+ 306,
2282
+ 307,
2283
+ 308,
2284
+ 309,
2285
+ 310,
2286
+ 311,
2287
+ 312,
2288
+ 313,
2289
+ 314,
2290
+ 315,
2291
+ 316,
2292
+ 317,
2293
+ 318,
2294
+ 319,
2295
+ 320,
2296
+ 321,
2297
+ 322,
2298
+ 323,
2299
+ 324,
2300
+ 325,
2301
+ 327,
2302
+ 328,
2303
+ 329,
2304
+ 330,
2305
+ 331,
2306
+ 332,
2307
+ 333,
2308
+ 334,
2309
+ 335,
2310
+ 336,
2311
+ 337,
2312
+ 338,
2313
+ 339,
2314
+ 340,
2315
+ 341,
2316
+ 342,
2317
+ 343,
2318
+ 344,
2319
+ 345,
2320
+ 346,
2321
+ 347,
2322
+ 348,
2323
+ 349,
2324
+ 350,
2325
+ 351,
2326
+ 352,
2327
+ 353,
2328
+ 354,
2329
+ 355,
2330
+ 356,
2331
+ 357,
2332
+ 358,
2333
+ 359,
2334
+ 360,
2335
+ 361,
2336
+ 362,
2337
+ 363,
2338
+ 364,
2339
+ 365,
2340
+ 366,
2341
+ 367,
2342
+ 368,
2343
+ 369,
2344
+ 370,
2345
+ 371,
2346
+ 372,
2347
+ 373,
2348
+ 374,
2349
+ 375,
2350
+ 376,
2351
+ 377,
2352
+ 378,
2353
+ 379,
2354
+ 380,
2355
+ 381,
2356
+ 382,
2357
+ 383,
2358
+ 384,
2359
+ 385,
2360
+ 386,
2361
+ 387,
2362
+ 388,
2363
+ 389,
2364
+ 390,
2365
+ 391,
2366
+ 392,
2367
+ 393,
2368
+ 394,
2369
+ 396,
2370
+ 397,
2371
+ 398,
2372
+ 399,
2373
+ 400,
2374
+ 401,
2375
+ 402,
2376
+ 403,
2377
+ 404,
2378
+ 405,
2379
+ 406,
2380
+ 407,
2381
+ 408,
2382
+ 409,
2383
+ 410,
2384
+ 411,
2385
+ 412,
2386
+ 413,
2387
+ 414,
2388
+ 415,
2389
+ 416,
2390
+ 417,
2391
+ 418,
2392
+ 419,
2393
+ 420,
2394
+ 421,
2395
+ 422,
2396
+ 423,
2397
+ 424,
2398
+ 425,
2399
+ 426,
2400
+ 427,
2401
+ 428,
2402
+ 429,
2403
+ 430,
2404
+ 431,
2405
+ 432,
2406
+ 433,
2407
+ 434,
2408
+ 435,
2409
+ 436,
2410
+ 437,
2411
+ 438,
2412
+ 439,
2413
+ 440,
2414
+ 441,
2415
+ 442,
2416
+ 443,
2417
+ 444,
2418
+ 445,
2419
+ 446,
2420
+ 447,
2421
+ 448,
2422
+ 449,
2423
+ 450,
2424
+ 451,
2425
+ 453,
2426
+ 454,
2427
+ 455,
2428
+ 456,
2429
+ 457,
2430
+ 458,
2431
+ 459,
2432
+ 460,
2433
+ 461,
2434
+ 462,
2435
+ 463,
2436
+ 464,
2437
+ 465,
2438
+ 466,
2439
+ 467,
2440
+ 468,
2441
+ 469,
2442
+ 470,
2443
+ 471,
2444
+ 472,
2445
+ 473,
2446
+ 474,
2447
+ 475,
2448
+ 476,
2449
+ 477,
2450
+ 478,
2451
+ 479,
2452
+ 480,
2453
+ 481,
2454
+ 482,
2455
+ 483,
2456
+ 484,
2457
+ 485,
2458
+ 486,
2459
+ 487,
2460
+ 488,
2461
+ 489,
2462
+ 490,
2463
+ 491,
2464
+ 492,
2465
+ 493,
2466
+ 494,
2467
+ 495,
2468
+ 496,
2469
+ 497,
2470
+ 498,
2471
+ 499,
2472
+ 500,
2473
+ 501,
2474
+ 502,
2475
+ 503,
2476
+ 504,
2477
+ 505,
2478
+ 506,
2479
+ 507,
2480
+ 508,
2481
+ 509,
2482
+ 510,
2483
+ 511,
2484
+ 512,
2485
+ 513,
2486
+ 514,
2487
+ 515,
2488
+ 516,
2489
+ 517,
2490
+ 518,
2491
+ 519,
2492
+ 520,
2493
+ 521,
2494
+ 522,
2495
+ 523,
2496
+ 524,
2497
+ 525,
2498
+ 526,
2499
+ 527,
2500
+ 528,
2501
+ 529,
2502
+ 530,
2503
+ 531,
2504
+ 532,
2505
+ 533,
2506
+ 534,
2507
+ 535,
2508
+ 536,
2509
+ 537,
2510
+ 538,
2511
+ 539,
2512
+ 540,
2513
+ 541,
2514
+ 542,
2515
+ 543,
2516
+ 544,
2517
+ 545,
2518
+ 546,
2519
+ 547,
2520
+ 548,
2521
+ 549,
2522
+ 550,
2523
+ 551,
2524
+ 552,
2525
+ 553,
2526
+ 554,
2527
+ 555,
2528
+ 556,
2529
+ 557,
2530
+ 558,
2531
+ 559,
2532
+ 560,
2533
+ 561,
2534
+ 562,
2535
+ 563,
2536
+ 564,
2537
+ 565,
2538
+ 566,
2539
+ 567,
2540
+ 568,
2541
+ 569,
2542
+ 570,
2543
+ 571,
2544
+ 572,
2545
+ 573,
2546
+ 574,
2547
+ 575,
2548
+ 576,
2549
+ 577,
2550
+ 579,
2551
+ 580,
2552
+ 581,
2553
+ 582,
2554
+ 583,
2555
+ 584,
2556
+ 585,
2557
+ 586,
2558
+ 587,
2559
+ 588,
2560
+ 589,
2561
+ 590,
2562
+ 591,
2563
+ 592,
2564
+ 593,
2565
+ 594,
2566
+ 595,
2567
+ 596,
2568
+ 597,
2569
+ 598,
2570
+ 599,
2571
+ 600,
2572
+ 601,
2573
+ 602,
2574
+ 603,
2575
+ 604,
2576
+ 605,
2577
+ 606,
2578
+ 607,
2579
+ 608,
2580
+ 609,
2581
+ 610,
2582
+ 611,
2583
+ 612,
2584
+ 613,
2585
+ 614,
2586
+ 615,
2587
+ 616,
2588
+ 617,
2589
+ 618,
2590
+ 619,
2591
+ 620,
2592
+ 621,
2593
+ 622,
2594
+ 623,
2595
+ 624,
2596
+ 625,
2597
+ 626,
2598
+ 627,
2599
+ 628,
2600
+ 629,
2601
+ 630,
2602
+ 631,
2603
+ 632,
2604
+ 633,
2605
+ 634,
2606
+ 635,
2607
+ 636,
2608
+ 637,
2609
+ 638,
2610
+ 639,
2611
+ 640,
2612
+ 641,
2613
+ 642,
2614
+ 643,
2615
+ 644,
2616
+ 645,
2617
+ 646,
2618
+ 647,
2619
+ 648,
2620
+ 649,
2621
+ 650,
2622
+ 651,
2623
+ 652,
2624
+ 653,
2625
+ 654,
2626
+ 655,
2627
+ 656,
2628
+ 657,
2629
+ 658,
2630
+ 659,
2631
+ 660,
2632
+ 661,
2633
+ 662,
2634
+ 663,
2635
+ 664,
2636
+ 665,
2637
+ 666,
2638
+ 667,
2639
+ 668,
2640
+ 669,
2641
+ 670,
2642
+ 671,
2643
+ 672,
2644
+ 673,
2645
+ 674,
2646
+ 675,
2647
+ 676,
2648
+ 677,
2649
+ 678,
2650
+ 679,
2651
+ 680,
2652
+ 681,
2653
+ 682,
2654
+ 683,
2655
+ 684,
2656
+ 685,
2657
+ 686,
2658
+ 687,
2659
+ 688,
2660
+ 689,
2661
+ 690,
2662
+ 691,
2663
+ 692,
2664
+ 693,
2665
+ 694,
2666
+ 695,
2667
+ 696,
2668
+ 697,
2669
+ 698,
2670
+ 699,
2671
+ 700,
2672
+ 701,
2673
+ 702,
2674
+ 703,
2675
+ 704,
2676
+ 705,
2677
+ 706,
2678
+ 707,
2679
+ 708,
2680
+ 709,
2681
+ 710,
2682
+ 711,
2683
+ 712,
2684
+ 713,
2685
+ 714,
2686
+ 715,
2687
+ 716,
2688
+ 717,
2689
+ 718,
2690
+ 719,
2691
+ 720,
2692
+ 721,
2693
+ 722,
2694
+ 723,
2695
+ 724,
2696
+ 725,
2697
+ 726,
2698
+ 727,
2699
+ 728,
2700
+ 729,
2701
+ 730,
2702
+ 731,
2703
+ 732,
2704
+ 733,
2705
+ 734,
2706
+ 735,
2707
+ 736,
2708
+ 737,
2709
+ 738,
2710
+ 739,
2711
+ 740,
2712
+ 741,
2713
+ 742,
2714
+ 743,
2715
+ 744,
2716
+ 745,
2717
+ 746,
2718
+ 747,
2719
+ 748,
2720
+ 749,
2721
+ 750,
2722
+ 751,
2723
+ 752,
2724
+ 753,
2725
+ 754,
2726
+ 755,
2727
+ 756,
2728
+ 757,
2729
+ 758,
2730
+ 759,
2731
+ 760,
2732
+ 761,
2733
+ 762,
2734
+ 763,
2735
+ 764,
2736
+ 765,
2737
+ 766,
2738
+ 767,
2739
+ 768,
2740
+ 769,
2741
+ 770,
2742
+ 771,
2743
+ 772,
2744
+ 773,
2745
+ 774,
2746
+ 775,
2747
+ 776,
2748
+ 777,
2749
+ 778,
2750
+ 779,
2751
+ 780,
2752
+ 781,
2753
+ 782,
2754
+ 783,
2755
+ 784,
2756
+ 785,
2757
+ 786,
2758
+ 787,
2759
+ 788,
2760
+ 789,
2761
+ 790,
2762
+ 791,
2763
+ 792,
2764
+ 793,
2765
+ 794,
2766
+ 795,
2767
+ 796,
2768
+ 797,
2769
+ 798,
2770
+ 799,
2771
+ 800,
2772
+ 801,
2773
+ 802,
2774
+ 803,
2775
+ 804,
2776
+ 805,
2777
+ 806,
2778
+ 807,
2779
+ 808,
2780
+ 809,
2781
+ 810,
2782
+ 811,
2783
+ 812,
2784
+ 813,
2785
+ 814,
2786
+ 815,
2787
+ 816,
2788
+ 817,
2789
+ 818,
2790
+ 819,
2791
+ 820,
2792
+ 821,
2793
+ 822,
2794
+ 823,
2795
+ 824,
2796
+ 825,
2797
+ 826,
2798
+ 827,
2799
+ 828,
2800
+ 829,
2801
+ 830,
2802
+ 831,
2803
+ 832,
2804
+ 833,
2805
+ 834,
2806
+ 835,
2807
+ 836,
2808
+ 837,
2809
+ 838,
2810
+ 839,
2811
+ 840,
2812
+ 841,
2813
+ 842,
2814
+ 843,
2815
+ 844,
2816
+ 845,
2817
+ 846,
2818
+ 847,
2819
+ 848,
2820
+ 849,
2821
+ 850,
2822
+ 851,
2823
+ 852,
2824
+ 853,
2825
+ 854,
2826
+ 855,
2827
+ 856,
2828
+ 857,
2829
+ 858,
2830
+ 859,
2831
+ 860,
2832
+ 861,
2833
+ 862,
2834
+ 863,
2835
+ 864,
2836
+ 865,
2837
+ 866,
2838
+ 867,
2839
+ 868,
2840
+ 869,
2841
+ 870,
2842
+ 871,
2843
+ 872,
2844
+ 873,
2845
+ 874,
2846
+ 875,
2847
+ 876,
2848
+ 877,
2849
+ 878,
2850
+ 879,
2851
+ 880,
2852
+ 881,
2853
+ 882,
2854
+ 883,
2855
+ 884,
2856
+ 885,
2857
+ 886,
2858
+ 887,
2859
+ 888,
2860
+ 889,
2861
+ 890,
2862
+ 891,
2863
+ 892,
2864
+ 893,
2865
+ 894,
2866
+ 895,
2867
+ 896,
2868
+ 897,
2869
+ 898,
2870
+ 899,
2871
+ 900,
2872
+ 901,
2873
+ 902,
2874
+ 903,
2875
+ 904,
2876
+ 905,
2877
+ 906,
2878
+ 907,
2879
+ 908,
2880
+ 909,
2881
+ 911,
2882
+ 912,
2883
+ 913,
2884
+ 914,
2885
+ 915,
2886
+ 916,
2887
+ 917,
2888
+ 918,
2889
+ 919,
2890
+ 920,
2891
+ 921,
2892
+ 922,
2893
+ 923,
2894
+ 924,
2895
+ 925,
2896
+ 926,
2897
+ 927,
2898
+ 928,
2899
+ 929,
2900
+ 930,
2901
+ 931,
2902
+ 932,
2903
+ 933,
2904
+ 934,
2905
+ 935,
2906
+ 936,
2907
+ 937,
2908
+ 938,
2909
+ 939,
2910
+ 940,
2911
+ 941,
2912
+ 942,
2913
+ 943,
2914
+ 944,
2915
+ 945,
2916
+ 946,
2917
+ 947,
2918
+ 948,
2919
+ 949,
2920
+ 950,
2921
+ 951,
2922
+ 952,
2923
+ 953,
2924
+ 954,
2925
+ 955,
2926
+ 956,
2927
+ 957,
2928
+ 958,
2929
+ 960,
2930
+ 961,
2931
+ 962,
2932
+ 963,
2933
+ 965,
2934
+ 966,
2935
+ 967,
2936
+ 968,
2937
+ 969,
2938
+ 970,
2939
+ 971,
2940
+ 972,
2941
+ 973,
2942
+ 974,
2943
+ 978,
2944
+ 979,
2945
+ 980,
2946
+ 981,
2947
+ 982,
2948
+ 983,
2949
+ 984,
2950
+ 985,
2951
+ 986,
2952
+ 987,
2953
+ 988,
2954
+ 989,
2955
+ 990,
2956
+ 991,
2957
+ 992,
2958
+ 993,
2959
+ 994,
2960
+ 995,
2961
+ 996,
2962
+ 997,
2963
+ 998,
2964
+ 999,
2965
+ 1000,
2966
+ 1001,
2967
+ 1002,
2968
+ 1003,
2969
+ 1004,
2970
+ 1005,
2971
+ 1006,
2972
+ 1007,
2973
+ 1008,
2974
+ 1009,
2975
+ 1010,
2976
+ 1011,
2977
+ 1012,
2978
+ 1013,
2979
+ 1014,
2980
+ 1015,
2981
+ 1016,
2982
+ 1017,
2983
+ 1018,
2984
+ 1019,
2985
+ 1020,
2986
+ 1021,
2987
+ 1022,
2988
+ 1023,
2989
+ 1024,
2990
+ 1025,
2991
+ 1026,
2992
+ 1027,
2993
+ 1028,
2994
+ 1029,
2995
+ 1030,
2996
+ 1031,
2997
+ 1032,
2998
+ 1033,
2999
+ 1034,
3000
+ 1035,
3001
+ 1036,
3002
+ 1037,
3003
+ 1038,
3004
+ 1039,
3005
+ 1040,
3006
+ 1041,
3007
+ 1042,
3008
+ 1043,
3009
+ 1044,
3010
+ 1045,
3011
+ 1046,
3012
+ 1047,
3013
+ 1048,
3014
+ 1049,
3015
+ 1050,
3016
+ 1051,
3017
+ 1052,
3018
+ 1053,
3019
+ 1054,
3020
+ 1055,
3021
+ 1056,
3022
+ 1057,
3023
+ 1058,
3024
+ 1059,
3025
+ 1060,
3026
+ 1061,
3027
+ 1062,
3028
+ 1063,
3029
+ 1064,
3030
+ 1065,
3031
+ 1066,
3032
+ 1067,
3033
+ 1068,
3034
+ 1069,
3035
+ 1070,
3036
+ 1071,
3037
+ 1072,
3038
+ 1073,
3039
+ 1074,
3040
+ 1075,
3041
+ 1076,
3042
+ 1077,
3043
+ 1078,
3044
+ 1079,
3045
+ 1080,
3046
+ 1081,
3047
+ 1082,
3048
+ 1083,
3049
+ 1084,
3050
+ 1085,
3051
+ 1086,
3052
+ 1087,
3053
+ 1088,
3054
+ 1089,
3055
+ 1090,
3056
+ 1091,
3057
+ 1092,
3058
+ 1093,
3059
+ 1094,
3060
+ 1095,
3061
+ 1096,
3062
+ 1097,
3063
+ 1098,
3064
+ 1099,
3065
+ 1100,
3066
+ 1101,
3067
+ 1102,
3068
+ 1103,
3069
+ 1104,
3070
+ 1105,
3071
+ 1106,
3072
+ 1107,
3073
+ 1108,
3074
+ 1109,
3075
+ 1110,
3076
+ 1111,
3077
+ 1112,
3078
+ 1113,
3079
+ 1114,
3080
+ 1115,
3081
+ 1116,
3082
+ 1117,
3083
+ 1118,
3084
+ 1119,
3085
+ 1120,
3086
+ 1121,
3087
+ 1122,
3088
+ 1123,
3089
+ 1124,
3090
+ 1125,
3091
+ 1126,
3092
+ 1127,
3093
+ 1128,
3094
+ 1129,
3095
+ 1130,
3096
+ 1131,
3097
+ 1132,
3098
+ 1133,
3099
+ 1134,
3100
+ 1135,
3101
+ 1136,
3102
+ 1137,
3103
+ 1138,
3104
+ 1139,
3105
+ 1140,
3106
+ 1141,
3107
+ 1142,
3108
+ 1143,
3109
+ 1144,
3110
+ 1145,
3111
+ 1146,
3112
+ 1147,
3113
+ 1148,
3114
+ 1149,
3115
+ 1150,
3116
+ 1151,
3117
+ 1152,
3118
+ 1153,
3119
+ 1154,
3120
+ 1155,
3121
+ 1156,
3122
+ 1157,
3123
+ 1158,
3124
+ 1159,
3125
+ 1160,
3126
+ 1161,
3127
+ 1162,
3128
+ 1163,
3129
+ 1164,
3130
+ 1165,
3131
+ 1166,
3132
+ 1167,
3133
+ 1168,
3134
+ 1169,
3135
+ 1170,
3136
+ 1171,
3137
+ 1173,
3138
+ 1174,
3139
+ 1177,
3140
+ 1179,
3141
+ 1180,
3142
+ 1181,
3143
+ 1182,
3144
+ 1183,
3145
+ 1184,
3146
+ 1185,
3147
+ 1186,
3148
+ 1187,
3149
+ 1188,
3150
+ 1189,
3151
+ 1190,
3152
+ 1191,
3153
+ 1192,
3154
+ 1193,
3155
+ 1195,
3156
+ 1196,
3157
+ 1197,
3158
+ 1198,
3159
+ 1199,
3160
+ 1200,
3161
+ 1201,
3162
+ 1202,
3163
+ 1203,
3164
+ 1204,
3165
+ 1205,
3166
+ 1206,
3167
+ 1207,
3168
+ 1208,
3169
+ 1209,
3170
+ 1210,
3171
+ 1211,
3172
+ 1212,
3173
+ 1213,
3174
+ 1214,
3175
+ 1215,
3176
+ 1216,
3177
+ 1217,
3178
+ 1218,
3179
+ 1219,
3180
+ 1220,
3181
+ 1221,
3182
+ 1222,
3183
+ 1223,
3184
+ 1224,
3185
+ 1225,
3186
+ 1226,
3187
+ 1227,
3188
+ 1228,
3189
+ 1229,
3190
+ 1230,
3191
+ 1231,
3192
+ 1232,
3193
+ 1233,
3194
+ 1234,
3195
+ 1235,
3196
+ 1236,
3197
+ 1237,
3198
+ 1238,
3199
+ 1239,
3200
+ 1240,
3201
+ 1241,
3202
+ 1242,
3203
+ 1244,
3204
+ 1245,
3205
+ 1246,
3206
+ 1247,
3207
+ 1248,
3208
+ 1249,
3209
+ 1250,
3210
+ 1251,
3211
+ 1252,
3212
+ 1253,
3213
+ 1254,
3214
+ 1255,
3215
+ 1256,
3216
+ 1257,
3217
+ 1258,
3218
+ 1259,
3219
+ 1260,
3220
+ 1261,
3221
+ 1262,
3222
+ 1263,
3223
+ 1264,
3224
+ 1265,
3225
+ 1266,
3226
+ 1267,
3227
+ 1268,
3228
+ 1269,
3229
+ 1270,
3230
+ 1271,
3231
+ 1272,
3232
+ 1273,
3233
+ 1274,
3234
+ 1275,
3235
+ 1276,
3236
+ 1277,
3237
+ 1278,
3238
+ 1279,
3239
+ 1280,
3240
+ 1281,
3241
+ 1282,
3242
+ 1283,
3243
+ 1284,
3244
+ 1285,
3245
+ 1286,
3246
+ 1287,
3247
+ 1288,
3248
+ 1289,
3249
+ 1290,
3250
+ 1291,
3251
+ 1292,
3252
+ 1293,
3253
+ 1294,
3254
+ 1295,
3255
+ 1296,
3256
+ 1297,
3257
+ 1298,
3258
+ 1299,
3259
+ 1300,
3260
+ 1301,
3261
+ 1302,
3262
+ 1303,
3263
+ 1304,
3264
+ 1305,
3265
+ 1306,
3266
+ 1307,
3267
+ 1308,
3268
+ 1309,
3269
+ 1310,
3270
+ 1311,
3271
+ 1312,
3272
+ 1313,
3273
+ 1314,
3274
+ 1315,
3275
+ 1316,
3276
+ 1317,
3277
+ 1318,
3278
+ 1319,
3279
+ 1320,
3280
+ 1321,
3281
+ 1322,
3282
+ 1323,
3283
+ 1324,
3284
+ 1325,
3285
+ 1326,
3286
+ 1327,
3287
+ 1328,
3288
+ 1329,
3289
+ 1330,
3290
+ 1331,
3291
+ 1332,
3292
+ 1333,
3293
+ 1334,
3294
+ 1335,
3295
+ 1336,
3296
+ 1337,
3297
+ 1338,
3298
+ 1339,
3299
+ 1340,
3300
+ 1341,
3301
+ 1342,
3302
+ 1343,
3303
+ 1344,
3304
+ 1345,
3305
+ 1346,
3306
+ 1347,
3307
+ 1348,
3308
+ 1349,
3309
+ 1350,
3310
+ 1351,
3311
+ 1352,
3312
+ 1353,
3313
+ 1354,
3314
+ 1355,
3315
+ 1356,
3316
+ 1357,
3317
+ 1358,
3318
+ 1359,
3319
+ 1360,
3320
+ 1361,
3321
+ 1362,
3322
+ 1363,
3323
+ 1364,
3324
+ 1365,
3325
+ 1366,
3326
+ 1367,
3327
+ 1368,
3328
+ 1369,
3329
+ 1370,
3330
+ 1371,
3331
+ 1372,
3332
+ 1373,
3333
+ 1374,
3334
+ 1375,
3335
+ 1376,
3336
+ 1377,
3337
+ 1378,
3338
+ 1379,
3339
+ 1380,
3340
+ 1381,
3341
+ 1382,
3342
+ 1383,
3343
+ 1384,
3344
+ 1385,
3345
+ 1386,
3346
+ 1387,
3347
+ 1388,
3348
+ 1389,
3349
+ 1390,
3350
+ 1391,
3351
+ 1392,
3352
+ 1393,
3353
+ 1394,
3354
+ 1395,
3355
+ 1396,
3356
+ 1397,
3357
+ 1398,
3358
+ 1399,
3359
+ 1400,
3360
+ 1401,
3361
+ 1402,
3362
+ 1403,
3363
+ 1404,
3364
+ 1405,
3365
+ 1406,
3366
+ 1407,
3367
+ 1408,
3368
+ 1409,
3369
+ 1410,
3370
+ 1411,
3371
+ 1412,
3372
+ 1413,
3373
+ 1414,
3374
+ 1415,
3375
+ 1416,
3376
+ 1417,
3377
+ 1418,
3378
+ 1419,
3379
+ 1420,
3380
+ 1421,
3381
+ 1422,
3382
+ 1423,
3383
+ 1424,
3384
+ 1425,
3385
+ 1426,
3386
+ 1427,
3387
+ 1428,
3388
+ 1429,
3389
+ 1430,
3390
+ 1431,
3391
+ 1432,
3392
+ 1433,
3393
+ 1434,
3394
+ 1435,
3395
+ 1436,
3396
+ 1437,
3397
+ 1438,
3398
+ 1439,
3399
+ 1440,
3400
+ 1441,
3401
+ 1442,
3402
+ 1443,
3403
+ 1444,
3404
+ 1445,
3405
+ 1446,
3406
+ 1447,
3407
+ 1448,
3408
+ 1449,
3409
+ 1450,
3410
+ 1451,
3411
+ 1452,
3412
+ 1453,
3413
+ 1454,
3414
+ 1455,
3415
+ 1456,
3416
+ 1457,
3417
+ 1458,
3418
+ 1459,
3419
+ 1460,
3420
+ 1461,
3421
+ 1462,
3422
+ 1463,
3423
+ 1464,
3424
+ 1465,
3425
+ 1466,
3426
+ 1467,
3427
+ 1468,
3428
+ 1469,
3429
+ 1470,
3430
+ 1471,
3431
+ 1472,
3432
+ 1473,
3433
+ 1474,
3434
+ 1475,
3435
+ 1476,
3436
+ 1477,
3437
+ 1478,
3438
+ 1479,
3439
+ 1480,
3440
+ 1481,
3441
+ 1482,
3442
+ 1483,
3443
+ 1484,
3444
+ 1485,
3445
+ 1486,
3446
+ 1487,
3447
+ 1488,
3448
+ 1489,
3449
+ 1490,
3450
+ 1491,
3451
+ 1492,
3452
+ 1493,
3453
+ 1494,
3454
+ 1495,
3455
+ 1496,
3456
+ 1497,
3457
+ 1498,
3458
+ 1499,
3459
+ 1500,
3460
+ 1501,
3461
+ 1502,
3462
+ 1503,
3463
+ 1504,
3464
+ 1505,
3465
+ 1506,
3466
+ 1507,
3467
+ 1508,
3468
+ 1509,
3469
+ 1510,
3470
+ 1511,
3471
+ 1512,
3472
+ 1513,
3473
+ 1514,
3474
+ 1515,
3475
+ 1516,
3476
+ 1517,
3477
+ 1518,
3478
+ 1519,
3479
+ 1520,
3480
+ 1521,
3481
+ 1522,
3482
+ 1523,
3483
+ 1524,
3484
+ 1525,
3485
+ 1526,
3486
+ 1527,
3487
+ 1528,
3488
+ 1529,
3489
+ 1530,
3490
+ 1531,
3491
+ 1532,
3492
+ 1533,
3493
+ 1534,
3494
+ 1535,
3495
+ 1536,
3496
+ 1537,
3497
+ 1538,
3498
+ 1539,
3499
+ 1540,
3500
+ 1541,
3501
+ 1542,
3502
+ 1543,
3503
+ 1544,
3504
+ 1545,
3505
+ 1546,
3506
+ 1547,
3507
+ 1548,
3508
+ 1549,
3509
+ 1550,
3510
+ 1551,
3511
+ 1552,
3512
+ 1553,
3513
+ 1554,
3514
+ 1555,
3515
+ 1556,
3516
+ 1557,
3517
+ 1558,
3518
+ 1559,
3519
+ 1560,
3520
+ 1561,
3521
+ 1562,
3522
+ 1563,
3523
+ 1564,
3524
+ 1565,
3525
+ 1566,
3526
+ 1567,
3527
+ 1568,
3528
+ 1569,
3529
+ 1570,
3530
+ 1571,
3531
+ 1572,
3532
+ 1573,
3533
+ 1574,
3534
+ 1575,
3535
+ 1576,
3536
+ 1577,
3537
+ 1578,
3538
+ 1579,
3539
+ 1580,
3540
+ 1581,
3541
+ 1582,
3542
+ 1583,
3543
+ 1584,
3544
+ 1585,
3545
+ 1586,
3546
+ 1587,
3547
+ 1588,
3548
+ 1589,
3549
+ 1590,
3550
+ 1591,
3551
+ 1592,
3552
+ 1593,
3553
+ 1594,
3554
+ 1595,
3555
+ 1596,
3556
+ 1597,
3557
+ 1598,
3558
+ 1599,
3559
+ 1600,
3560
+ 1601,
3561
+ 1602,
3562
+ 1603,
3563
+ 1604,
3564
+ 1605,
3565
+ 1606,
3566
+ 1607,
3567
+ 1608,
3568
+ 1609,
3569
+ 1610,
3570
+ 1611,
3571
+ 1612,
3572
+ 1613,
3573
+ 1614,
3574
+ 1615,
3575
+ 1616,
3576
+ 1617,
3577
+ 1618,
3578
+ 1619,
3579
+ 1620,
3580
+ 1621,
3581
+ 1622,
3582
+ 1623,
3583
+ 1624,
3584
+ 1625,
3585
+ 1626,
3586
+ 1627,
3587
+ 1628,
3588
+ 1629,
3589
+ 1630,
3590
+ 1631,
3591
+ 1632,
3592
+ 1633,
3593
+ 1634,
3594
+ 1635,
3595
+ 1636,
3596
+ 1637,
3597
+ 1638,
3598
+ 1639,
3599
+ 1640,
3600
+ 1641,
3601
+ 1642,
3602
+ 1643,
3603
+ 1644,
3604
+ 1645,
3605
+ 1646,
3606
+ 1647,
3607
+ 1648,
3608
+ 1649,
3609
+ 1650,
3610
+ 1651,
3611
+ 1652,
3612
+ 1653,
3613
+ 1654,
3614
+ 1655,
3615
+ 1656,
3616
+ 1657,
3617
+ 1658,
3618
+ 1659,
3619
+ 1660,
3620
+ 1661,
3621
+ 1662,
3622
+ 1663,
3623
+ 1664,
3624
+ 1665,
3625
+ 1666,
3626
+ 1667,
3627
+ 1668,
3628
+ 1669,
3629
+ 1670,
3630
+ 1671,
3631
+ 1672,
3632
+ 1673,
3633
+ 1674,
3634
+ 1675,
3635
+ 1676,
3636
+ 1677,
3637
+ 1678,
3638
+ 1679,
3639
+ 1680,
3640
+ 1681,
3641
+ 1682,
3642
+ 1683,
3643
+ 1684,
3644
+ 1685,
3645
+ 1686,
3646
+ 1687,
3647
+ 1688,
3648
+ 1689,
3649
+ 1690,
3650
+ 1691,
3651
+ 1692,
3652
+ 1693,
3653
+ 1694,
3654
+ 1695,
3655
+ 1696,
3656
+ 1697,
3657
+ 1698,
3658
+ 1699,
3659
+ 1700,
3660
+ 1701,
3661
+ 1702,
3662
+ 1703,
3663
+ 1704,
3664
+ 1705,
3665
+ 1706,
3666
+ 1707,
3667
+ 1708,
3668
+ 1709,
3669
+ 1710,
3670
+ 1711,
3671
+ 1712,
3672
+ 1713,
3673
+ 1714,
3674
+ 1715,
3675
+ 1716,
3676
+ 1717,
3677
+ 1718,
3678
+ 1719,
3679
+ 1720,
3680
+ 1721,
3681
+ 1722,
3682
+ 1723,
3683
+ 1724,
3684
+ 1725,
3685
+ 1726,
3686
+ 1727,
3687
+ 1728,
3688
+ 1729,
3689
+ 1730,
3690
+ 1731,
3691
+ 1732,
3692
+ 1733,
3693
+ 1734,
3694
+ 1735,
3695
+ 1736,
3696
+ 1737,
3697
+ 1738,
3698
+ 1740,
3699
+ 1741,
3700
+ 1742,
3701
+ 1743,
3702
+ 1744,
3703
+ 1745,
3704
+ 1746,
3705
+ 1747,
3706
+ 1748,
3707
+ 1749,
3708
+ 1750,
3709
+ 1751,
3710
+ 1752,
3711
+ 1753,
3712
+ 1754,
3713
+ 1755,
3714
+ 1756,
3715
+ 1757,
3716
+ 1758,
3717
+ 1759,
3718
+ 1760,
3719
+ 1761,
3720
+ 1762,
3721
+ 1763,
3722
+ 1764,
3723
+ 1765,
3724
+ 1766,
3725
+ 1767,
3726
+ 1768,
3727
+ 1769,
3728
+ 1770,
3729
+ 1771,
3730
+ 1772,
3731
+ 1773,
3732
+ 1774,
3733
+ 1775,
3734
+ 1776,
3735
+ 1777,
3736
+ 1778,
3737
+ 1779,
3738
+ 1780,
3739
+ 1781,
3740
+ 1782,
3741
+ 1783,
3742
+ 1784,
3743
+ 1785,
3744
+ 1786,
3745
+ 1787,
3746
+ 1788,
3747
+ 1789,
3748
+ 1790,
3749
+ 1791,
3750
+ 1792,
3751
+ 1793,
3752
+ 1794,
3753
+ 1795,
3754
+ 1841,
3755
+ 1864,
3756
+ 1865,
3757
+ 1866,
3758
+ 1867,
3759
+ 1868,
3760
+ 1869,
3761
+ 1871,
3762
+ 1872,
3763
+ 1873,
3764
+ 1874,
3765
+ 1875,
3766
+ 1876,
3767
+ 1877,
3768
+ 1878,
3769
+ 1879,
3770
+ 1880,
3771
+ 1881,
3772
+ 1882,
3773
+ 1883,
3774
+ 1884,
3775
+ 1885,
3776
+ 1886,
3777
+ 1887,
3778
+ 1888,
3779
+ 1889,
3780
+ 1890,
3781
+ 1891,
3782
+ 1892,
3783
+ 1893,
3784
+ 1894,
3785
+ 1895,
3786
+ 1896,
3787
+ 1897,
3788
+ 1898,
3789
+ 1899,
3790
+ 1900,
3791
+ 1901,
3792
+ 1902,
3793
+ 1903,
3794
+ 1904,
3795
+ 1905,
3796
+ 1906,
3797
+ 1907,
3798
+ 1908,
3799
+ 1909,
3800
+ 1910,
3801
+ 1911,
3802
+ 1912,
3803
+ 1913,
3804
+ 1914,
3805
+ 1915,
3806
+ 1916,
3807
+ 1917,
3808
+ 1918,
3809
+ 1920,
3810
+ 1921,
3811
+ 1922,
3812
+ 1923,
3813
+ 1924,
3814
+ 1925,
3815
+ 1926,
3816
+ 1927,
3817
+ 1928,
3818
+ 1929,
3819
+ 1930,
3820
+ 1931,
3821
+ 1932,
3822
+ 1933,
3823
+ 1934,
3824
+ 1935,
3825
+ 1936,
3826
+ 1937,
3827
+ 1938,
3828
+ 1939,
3829
+ 1940,
3830
+ 1941,
3831
+ 1942,
3832
+ 1943,
3833
+ 1944,
3834
+ 1945,
3835
+ 1946,
3836
+ 1947,
3837
+ 1948,
3838
+ 1949,
3839
+ 1950,
3840
+ 1951,
3841
+ 1952,
3842
+ 1953,
3843
+ 1954,
3844
+ 1955,
3845
+ 1956,
3846
+ 1957,
3847
+ 1958,
3848
+ 1959,
3849
+ 1962,
3850
+ 1963,
3851
+ 1964,
3852
+ 1966,
3853
+ 1968,
3854
+ 1969,
3855
+ 1970,
3856
+ 1971,
3857
+ 1972,
3858
+ 1973,
3859
+ 1974,
3860
+ 1975,
3861
+ 1976,
3862
+ 1977,
3863
+ 1978,
3864
+ 1979,
3865
+ 1980,
3866
+ 1981,
3867
+ 1982,
3868
+ 1983,
3869
+ 1984,
3870
+ 1985,
3871
+ 1986,
3872
+ 1987,
3873
+ 1988,
3874
+ 1989,
3875
+ 1990,
3876
+ 1991,
3877
+ 1992,
3878
+ 1993,
3879
+ 1994,
3880
+ 1995,
3881
+ 1996,
3882
+ 1997,
3883
+ 1998,
3884
+ 1999,
3885
+ 2000,
3886
+ 2001,
3887
+ 2002,
3888
+ 2004,
3889
+ 2005,
3890
+ 2006,
3891
+ 2007,
3892
+ 2008,
3893
+ 2009,
3894
+ 2010,
3895
+ 2011
3896
+ ]
3897
+ }
data/statistics/statistics_modified_v1.json ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "trans_mean": [
3
+ 0.02,
4
+ 0.0,
5
+ 14.79
6
+ ],
7
+ "trans_std": [
8
+ 0.10,
9
+ 0.10,
10
+ 2.65
11
+ ],
12
+ "flength_mean": [
13
+ 2169.0
14
+ ],
15
+ "flength_std": [
16
+ 448.0
17
+ ],
18
+ "pose_mean": [
19
+ [
20
+ [
21
+ 0.44,
22
+ 0.0,
23
+ -0.0
24
+ ],
25
+ [
26
+ 0.0,
27
+ 0.0,
28
+ -1.0
29
+ ],
30
+ [
31
+ -0.0,
32
+ 0.44,
33
+ 0.0
34
+ ]
35
+ ],
36
+ [
37
+ [
38
+ 0.97,
39
+ -0.0,
40
+ -0.08
41
+ ],
42
+ [
43
+ 0.0,
44
+ 0.98,
45
+ 0.0
46
+ ],
47
+ [
48
+ 0.08,
49
+ -0.0,
50
+ 0.98
51
+ ]
52
+ ],
53
+ [
54
+ [
55
+ 0.98,
56
+ 0.0,
57
+ 0.01
58
+ ],
59
+ [
60
+ -0.0,
61
+ 0.99,
62
+ 0.0
63
+ ],
64
+ [
65
+ -0.01,
66
+ 0.0,
67
+ 0.98
68
+ ]
69
+ ],
70
+ [
71
+ [
72
+ 0.98,
73
+ -0.0,
74
+ -0.03
75
+ ],
76
+ [
77
+ 0.0,
78
+ 0.99,
79
+ 0.0
80
+ ],
81
+ [
82
+ 0.04,
83
+ -0.0,
84
+ 0.98
85
+ ]
86
+ ],
87
+ [
88
+ [
89
+ 0.98,
90
+ 0.0,
91
+ 0.02
92
+ ],
93
+ [
94
+ -0.0,
95
+ 0.99,
96
+ -0.0
97
+ ],
98
+ [
99
+ -0.02,
100
+ -0.0,
101
+ 0.98
102
+ ]
103
+ ],
104
+ [
105
+ [
106
+ 0.99,
107
+ 0.0,
108
+ -0.0
109
+ ],
110
+ [
111
+ -0.0,
112
+ 0.99,
113
+ -0.0
114
+ ],
115
+ [
116
+ 0.0,
117
+ 0.0,
118
+ 0.99
119
+ ]
120
+ ],
121
+ [
122
+ [
123
+ 0.99,
124
+ 0.0,
125
+ 0.03
126
+ ],
127
+ [
128
+ 0.0,
129
+ 0.99,
130
+ -0.0
131
+ ],
132
+ [
133
+ -0.03,
134
+ 0.0,
135
+ 0.99
136
+ ]
137
+ ],
138
+ [
139
+ [
140
+ 0.95,
141
+ -0.05,
142
+ 0.04
143
+ ],
144
+ [
145
+ 0.05,
146
+ 0.98,
147
+ -0.01
148
+ ],
149
+ [
150
+ -0.03,
151
+ 0.01,
152
+ 0.96
153
+ ]
154
+ ],
155
+ [
156
+ [
157
+ 0.91,
158
+ -0.01,
159
+ -0.19
160
+ ],
161
+ [
162
+ -0.01,
163
+ 0.98,
164
+ -0.05
165
+ ],
166
+ [
167
+ 0.19,
168
+ 0.03,
169
+ 0.91
170
+ ]
171
+ ],
172
+ [
173
+ [
174
+ 0.85,
175
+ -0.04,
176
+ 0.23
177
+ ],
178
+ [
179
+ -0.0,
180
+ 0.99,
181
+ 0.07
182
+ ],
183
+ [
184
+ -0.23,
185
+ -0.06,
186
+ 0.85
187
+ ]
188
+ ],
189
+ [
190
+ [
191
+ 0.93,
192
+ 0.0,
193
+ 0.16
194
+ ],
195
+ [
196
+ -0.01,
197
+ 0.99,
198
+ 0.01
199
+ ],
200
+ [
201
+ -0.16,
202
+ -0.02,
203
+ 0.93
204
+ ]
205
+ ],
206
+ [
207
+ [
208
+ 0.95,
209
+ 0.05,
210
+ 0.03
211
+ ],
212
+ [
213
+ -0.05,
214
+ 0.98,
215
+ 0.02
216
+ ],
217
+ [
218
+ -0.03,
219
+ -0.01,
220
+ 0.96
221
+ ]
222
+ ],
223
+ [
224
+ [
225
+ 0.91,
226
+ 0.01,
227
+ -0.19
228
+ ],
229
+ [
230
+ 0.02,
231
+ 0.98,
232
+ 0.05
233
+ ],
234
+ [
235
+ 0.2,
236
+ -0.03,
237
+ 0.91
238
+ ]
239
+ ],
240
+ [
241
+ [
242
+ 0.84,
243
+ 0.03,
244
+ 0.24
245
+ ],
246
+ [
247
+ 0.01,
248
+ 0.99,
249
+ -0.06
250
+ ],
251
+ [
252
+ -0.24,
253
+ 0.07,
254
+ 0.84
255
+ ]
256
+ ],
257
+ [
258
+ [
259
+ 0.93,
260
+ -0.0,
261
+ 0.18
262
+ ],
263
+ [
264
+ 0.01,
265
+ 0.99,
266
+ -0.01
267
+ ],
268
+ [
269
+ -0.18,
270
+ 0.02,
271
+ 0.93
272
+ ]
273
+ ],
274
+ [
275
+ [
276
+ 0.95,
277
+ -0.0,
278
+ 0.01
279
+ ],
280
+ [
281
+ -0.0,
282
+ 0.96,
283
+ 0.0
284
+ ],
285
+ [
286
+ -0.0,
287
+ -0.0,
288
+ 0.99
289
+ ]
290
+ ],
291
+ [
292
+ [
293
+ 0.93,
294
+ 0.0,
295
+ -0.11
296
+ ],
297
+ [
298
+ -0.0,
299
+ 0.97,
300
+ -0.0
301
+ ],
302
+ [
303
+ 0.12,
304
+ 0.0,
305
+ 0.95
306
+ ]
307
+ ],
308
+ [
309
+ [
310
+ 0.96,
311
+ -0.04,
312
+ -0.06
313
+ ],
314
+ [
315
+ 0.03,
316
+ 0.98,
317
+ -0.02
318
+ ],
319
+ [
320
+ 0.06,
321
+ 0.01,
322
+ 0.96
323
+ ]
324
+ ],
325
+ [
326
+ [
327
+ 0.96,
328
+ 0.05,
329
+ 0.04
330
+ ],
331
+ [
332
+ -0.05,
333
+ 0.98,
334
+ -0.05
335
+ ],
336
+ [
337
+ -0.05,
338
+ 0.05,
339
+ 0.96
340
+ ]
341
+ ],
342
+ [
343
+ [
344
+ 0.96,
345
+ -0.0,
346
+ -0.09
347
+ ],
348
+ [
349
+ -0.0,
350
+ 0.99,
351
+ 0.01
352
+ ],
353
+ [
354
+ 0.09,
355
+ -0.01,
356
+ 0.96
357
+ ]
358
+ ],
359
+ [
360
+ [
361
+ 0.96,
362
+ 0.0,
363
+ 0.06
364
+ ],
365
+ [
366
+ -0.02,
367
+ 0.98,
368
+ 0.05
369
+ ],
370
+ [
371
+ -0.05,
372
+ -0.06,
373
+ 0.96
374
+ ]
375
+ ],
376
+ [
377
+ [
378
+ 0.96,
379
+ 0.04,
380
+ -0.07
381
+ ],
382
+ [
383
+ -0.03,
384
+ 0.98,
385
+ 0.02
386
+ ],
387
+ [
388
+ 0.07,
389
+ -0.01,
390
+ 0.96
391
+ ]
392
+ ],
393
+ [
394
+ [
395
+ 0.96,
396
+ -0.05,
397
+ 0.04
398
+ ],
399
+ [
400
+ 0.04,
401
+ 0.98,
402
+ 0.05
403
+ ],
404
+ [
405
+ -0.05,
406
+ -0.04,
407
+ 0.97
408
+ ]
409
+ ],
410
+ [
411
+ [
412
+ 0.96,
413
+ -0.0,
414
+ -0.09
415
+ ],
416
+ [
417
+ 0.0,
418
+ 0.99,
419
+ -0.01
420
+ ],
421
+ [
422
+ 0.09,
423
+ 0.01,
424
+ 0.96
425
+ ]
426
+ ],
427
+ [
428
+ [
429
+ 0.96,
430
+ -0.0,
431
+ 0.06
432
+ ],
433
+ [
434
+ 0.02,
435
+ 0.98,
436
+ -0.05
437
+ ],
438
+ [
439
+ -0.05,
440
+ 0.06,
441
+ 0.96
442
+ ]
443
+ ],
444
+ [
445
+ [
446
+ 0.73,
447
+ 0.0,
448
+ -0.4
449
+ ],
450
+ [
451
+ -0.0,
452
+ 0.98,
453
+ 0.0
454
+ ],
455
+ [
456
+ 0.39,
457
+ 0.0,
458
+ 0.73
459
+ ]
460
+ ],
461
+ [
462
+ [
463
+ 0.95,
464
+ -0.0,
465
+ -0.07
466
+ ],
467
+ [
468
+ 0.0,
469
+ 0.99,
470
+ -0.0
471
+ ],
472
+ [
473
+ 0.07,
474
+ 0.0,
475
+ 0.95
476
+ ]
477
+ ],
478
+ [
479
+ [
480
+ 0.98,
481
+ 0.0,
482
+ -0.09
483
+ ],
484
+ [
485
+ -0.0,
486
+ 0.99,
487
+ -0.0
488
+ ],
489
+ [
490
+ 0.09,
491
+ 0.0,
492
+ 0.98
493
+ ]
494
+ ],
495
+ [
496
+ [
497
+ 0.99,
498
+ -0.0,
499
+ 0.03
500
+ ],
501
+ [
502
+ 0.0,
503
+ 0.99,
504
+ -0.0
505
+ ],
506
+ [
507
+ -0.03,
508
+ 0.0,
509
+ 0.99
510
+ ]
511
+ ],
512
+ [
513
+ [
514
+ 0.96,
515
+ -0.0,
516
+ 0.1
517
+ ],
518
+ [
519
+ 0.0,
520
+ 0.98,
521
+ -0.0
522
+ ],
523
+ [
524
+ -0.09,
525
+ 0.0,
526
+ 0.96
527
+ ]
528
+ ],
529
+ [
530
+ [
531
+ 0.79,
532
+ -0.01,
533
+ 0.21
534
+ ],
535
+ [
536
+ 0.01,
537
+ 0.96,
538
+ 0.0
539
+ ],
540
+ [
541
+ -0.2,
542
+ 0.0,
543
+ 0.82
544
+ ]
545
+ ],
546
+ [
547
+ [
548
+ 0.89,
549
+ -0.0,
550
+ 0.07
551
+ ],
552
+ [
553
+ 0.0,
554
+ 0.98,
555
+ 0.0
556
+ ],
557
+ [
558
+ -0.07,
559
+ 0.0,
560
+ 0.9
561
+ ]
562
+ ],
563
+ [
564
+ [
565
+ 0.96,
566
+ -0.0,
567
+ 0.09
568
+ ],
569
+ [
570
+ 0.0,
571
+ 0.99,
572
+ -0.0
573
+ ],
574
+ [
575
+ -0.1,
576
+ 0.0,
577
+ 0.96
578
+ ]
579
+ ],
580
+ [
581
+ [
582
+ 0.93,
583
+ -0.09,
584
+ -0.07
585
+ ],
586
+ [
587
+ 0.1,
588
+ 0.93,
589
+ 0.03
590
+ ],
591
+ [
592
+ 0.03,
593
+ -0.06,
594
+ 0.95
595
+ ]
596
+ ],
597
+ [
598
+ [
599
+ 0.86,
600
+ 0.1,
601
+ -0.37
602
+ ],
603
+ [
604
+ -0.12,
605
+ 0.94,
606
+ 0.01
607
+ ],
608
+ [
609
+ 0.35,
610
+ 0.05,
611
+ 0.88
612
+ ]
613
+ ]
614
+ ]
615
+ }
datasets/test_image_crops/201030094143-stock-rhodesian-ridgeback-super-tease.jpg ADDED
datasets/test_image_crops/Akita-standing-outdoors-in-the-summer-400x267.jpg ADDED
datasets/test_image_crops/image_n02089078-black-and-tan_coonhound_n02089078_3810.png ADDED
gradio_demo/barc_demo_v3.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python gradio_demo/barc_demo_v3.py
2
+
3
+ import numpy as np
4
+ import os
5
+ import glob
6
+ import torch
7
+ from torch.utils.data import DataLoader
8
+ import torchvision
9
+ from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
10
+ import torchvision.transforms as T
11
+ import cv2
12
+ from matplotlib import pyplot as plt
13
+ from PIL import Image
14
+
15
+ import gradio as gr
16
+
17
+
18
+
19
+ import sys
20
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../', 'src'))
21
+ from stacked_hourglass.datasets.imgcropslist import ImgCrops
22
+ from combined_model.train_main_image_to_3d_withbreedrel import do_visual_epoch
23
+ from combined_model.model_shape_v7 import ModelImageTo3d_withshape_withproj
24
+
25
+ from configs.barc_cfg_defaults import get_cfg_global_updated
26
+
27
+
28
+
29
+ def get_prediction(model, img_path_or_img, confidence=0.5):
30
+ """
31
+ see https://haochen23.github.io/2020/04/object-detection-faster-rcnn.html#.YsMCm4TP3-g
32
+ get_prediction
33
+ parameters:
34
+ - img_path - path of the input image
35
+ - confidence - threshold value for prediction score
36
+ method:
37
+ - Image is obtained from the image path
38
+ - the image is converted to image tensor using PyTorch's Transforms
39
+ - image is passed through the model to get the predictions
40
+ - class, box coordinates are obtained, but only prediction score > threshold
41
+ are chosen.
42
+
43
+ """
44
+ if isinstance(img_path_or_img, str):
45
+ img = Image.open(img_path_or_img).convert('RGB')
46
+ else:
47
+ img = img_path_or_img
48
+ transform = T.Compose([T.ToTensor()])
49
+ img = transform(img)
50
+ pred = model([img])
51
+ # pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
52
+ pred_class = list(pred[0]['labels'].numpy())
53
+ pred_boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))] for i in list(pred[0]['boxes'].detach().numpy())]
54
+ pred_score = list(pred[0]['scores'].detach().numpy())
55
+ try:
56
+ pred_t = [pred_score.index(x) for x in pred_score if x>confidence][-1]
57
+ pred_boxes = pred_boxes[:pred_t+1]
58
+ pred_class = pred_class[:pred_t+1]
59
+ return pred_boxes, pred_class, pred_score
60
+ except:
61
+ print('no bounding box with a score that is high enough found! -> work on full image')
62
+ return None, None, None
63
+
64
+ def detect_object(model, img_path_or_img, confidence=0.5, rect_th=2, text_size=0.5, text_th=1):
65
+ """
66
+ see https://haochen23.github.io/2020/04/object-detection-faster-rcnn.html#.YsMCm4TP3-g
67
+ object_detection_api
68
+ parameters:
69
+ - img_path_or_img - path of the input image
70
+ - confidence - threshold value for prediction score
71
+ - rect_th - thickness of bounding box
72
+ - text_size - size of the class label text
73
+ - text_th - thichness of the text
74
+ method:
75
+ - prediction is obtained from get_prediction method
76
+ - for each prediction, bounding box is drawn and text is written
77
+ with opencv
78
+ - the final image is displayed
79
+ """
80
+ boxes, pred_cls, pred_scores = get_prediction(model, img_path_or_img, confidence)
81
+ if isinstance(img_path_or_img, str):
82
+ img = cv2.imread(img_path_or_img)
83
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
84
+ else:
85
+ img = img_path_or_img
86
+ is_first = True
87
+ bbox = None
88
+ if boxes is not None:
89
+ for i in range(len(boxes)):
90
+ cls = pred_cls[i]
91
+ if cls == 18 and bbox is None:
92
+ cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th)
93
+ # cv2.putText(img, pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
94
+ cv2.putText(img, str(pred_scores[i]), boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
95
+ bbox = boxes[i]
96
+ return img, bbox
97
+
98
+
99
+
100
+ def run_bbox_inference(input_image):
101
+ model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
102
+ model.eval()
103
+ out_path = os.path.join(cfg.paths.ROOT_OUT_PATH, 'gradio_examples', 'test2.png')
104
+ img, bbox = detect_object(model=model, img_path_or_img=input_image, confidence=0.5)
105
+ fig = plt.figure() # plt.figure(figsize=(20,30))
106
+ plt.imsave(out_path, img)
107
+ return img, bbox
108
+
109
+
110
+
111
+
112
+
113
+ def run_barc_inference(input_image, bbox=None):
114
+
115
+ # load configs
116
+ cfg = get_cfg_global_updated()
117
+
118
+ model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, 'barc_complete', 'model_best.pth.tar')
119
+
120
+
121
+
122
+ # Select the hardware device to use for inference.
123
+ if torch.cuda.is_available() and cfg.device=='cuda':
124
+ device = torch.device('cuda', torch.cuda.current_device())
125
+ # torch.backends.cudnn.benchmark = True
126
+ else:
127
+ device = torch.device('cpu')
128
+
129
+ path_model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, model_file_complete)
130
+
131
+ # Disable gradient calculations.
132
+ torch.set_grad_enabled(False)
133
+
134
+ # prepare complete model
135
+ complete_model = ModelImageTo3d_withshape_withproj(
136
+ num_stage_comb=cfg.params.NUM_STAGE_COMB, num_stage_heads=cfg.params.NUM_STAGE_HEADS, \
137
+ num_stage_heads_pose=cfg.params.NUM_STAGE_HEADS_POSE, trans_sep=cfg.params.TRANS_SEP, \
138
+ arch=cfg.params.ARCH, n_joints=cfg.params.N_JOINTS, n_classes=cfg.params.N_CLASSES, \
139
+ n_keyp=cfg.params.N_KEYP, n_bones=cfg.params.N_BONES, n_betas=cfg.params.N_BETAS, n_betas_limbs=cfg.params.N_BETAS_LIMBS, \
140
+ n_breeds=cfg.params.N_BREEDS, n_z=cfg.params.N_Z, image_size=cfg.params.IMG_SIZE, \
141
+ silh_no_tail=cfg.params.SILH_NO_TAIL, thr_keyp_sc=cfg.params.KP_THRESHOLD, add_z_to_3d_input=cfg.params.ADD_Z_TO_3D_INPUT,
142
+ n_segbps=cfg.params.N_SEGBPS, add_segbps_to_3d_input=cfg.params.ADD_SEGBPS_TO_3D_INPUT, add_partseg=cfg.params.ADD_PARTSEG, n_partseg=cfg.params.N_PARTSEG, \
143
+ fix_flength=cfg.params.FIX_FLENGTH, structure_z_to_betas=cfg.params.STRUCTURE_Z_TO_B, structure_pose_net=cfg.params.STRUCTURE_POSE_NET,
144
+ nf_version=cfg.params.NF_VERSION)
145
+
146
+ # load trained model
147
+ print(path_model_file_complete)
148
+ assert os.path.isfile(path_model_file_complete)
149
+ print('Loading model weights from file: {}'.format(path_model_file_complete))
150
+ checkpoint_complete = torch.load(path_model_file_complete)
151
+ state_dict_complete = checkpoint_complete['state_dict']
152
+ complete_model.load_state_dict(state_dict_complete, strict=False)
153
+ complete_model = complete_model.to(device)
154
+
155
+ save_imgs_path = os.path.join(cfg.paths.ROOT_OUT_PATH, 'gradio_examples')
156
+ if not os.path.exists(save_imgs_path):
157
+ os.makedirs(save_imgs_path)
158
+
159
+ input_image_list = [input_image]
160
+ if bbox is not None:
161
+ input_bbox_list = [bbox]
162
+ else:
163
+ input_bbox_list = None
164
+ val_dataset = ImgCrops(image_list=input_image_list, bbox_list=input_bbox_list, dataset_mode='complete')
165
+ test_name_list = val_dataset.test_name_list
166
+ val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False,
167
+ num_workers=0, pin_memory=True, drop_last=False)
168
+
169
+ # run visual evaluation
170
+ # remark: take ACC_Joints and DATA_INFO from StanExt as this is the training dataset
171
+ all_results = do_visual_epoch(val_loader, complete_model, device,
172
+ ImgCrops.DATA_INFO,
173
+ weight_dict=None,
174
+ acc_joints=ImgCrops.ACC_JOINTS,
175
+ save_imgs_path=None, # save_imgs_path,
176
+ metrics='all',
177
+ test_name_list=test_name_list,
178
+ render_all=cfg.params.RENDER_ALL,
179
+ pck_thresh=cfg.params.PCK_THRESH,
180
+ return_results=True)
181
+
182
+ mesh = all_results[0]['mesh_posed']
183
+ result_path = os.path.join(save_imgs_path, test_name_list[0] + '_z')
184
+
185
+ mesh.apply_transform([[-1, 0, 0, 0],
186
+ [0, -1, 0, 0],
187
+ [0, 0, 1, 1],
188
+ [0, 0, 0, 1]])
189
+ mesh.export(file_obj=result_path + '.glb')
190
+ result_gltf = result_path + '.glb'
191
+ return [result_gltf, result_gltf]
192
+
193
+
194
+
195
+
196
+
197
+
198
+ def run_complete_inference(input_image):
199
+
200
+ output_interm_image, output_interm_bbox = run_bbox_inference(input_image.copy())
201
+
202
+ print(output_interm_bbox)
203
+
204
+ # output_image = run_barc_inference(input_image)
205
+ output_image = run_barc_inference(input_image, output_interm_bbox)
206
+
207
+ return output_image
208
+
209
+
210
+
211
+
212
+ # demo = gr.Interface(run_barc_inference, gr.Image(), "image")
213
+ # demo = gr.Interface(run_complete_inference, gr.Image(), "image")
214
+
215
+
216
+
217
+ # see: https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py
218
+
219
+ description = '''
220
+ # BARC
221
+
222
+ #### Project Page
223
+ * https://barc.is.tue.mpg.de/
224
+
225
+ #### Description
226
+ This is a demo for BARC. While BARC is trained on image crops, this demo uses a pretrained Faster-RCNN in order to get bounding boxes for the dogs.
227
+ To see your result you may have to wait a minute or two, please be paitient.
228
+
229
+ <details>
230
+
231
+ <summary>More</summary>
232
+
233
+ #### Citation
234
+
235
+ ```
236
+ @inproceedings{BARC:2022,
237
+ title = {BARC}: Learning to Regress {3D} Dog Shape from Images by Exploiting Breed Information,
238
+ author = {Rueegg, Nadine and Zuffi, Silvia and Schindler, Konrad and Black, Michael J.},
239
+ booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
240
+ year = {2022}
241
+ }
242
+ ```
243
+
244
+ </details>
245
+ '''
246
+
247
+ examples = sorted(glob.glob(os.path.join(os.path.dirname(__file__), '../', 'datasets', 'test_image_crops', '*.jpg')) + glob.glob(os.path.join(os.path.dirname(__file__), '../', 'datasets', 'test_image_crops', '*.png')))
248
+
249
+
250
+ demo = gr.Interface(
251
+ fn=run_complete_inference,
252
+ description=description,
253
+ # inputs=gr.Image(type="filepath", label="Input Image"),
254
+ inputs=gr.Image(label="Input Image"),
255
+ outputs=[
256
+ gr.Model3D(
257
+ clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
258
+ gr.File(label="Download 3D Model")
259
+ ],
260
+ examples=examples,
261
+ thumbnail="barc_thumbnail.png",
262
+ allow_flagging="never",
263
+ cache_examples=True
264
+ )
265
+
266
+
267
+
268
+ demo.launch(share=True)
src/bps_2d/bps_for_segmentation.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # code idea from https://github.com/sergeyprokudin/bps
3
+
4
+ import os
5
+ import numpy as np
6
+ from PIL import Image
7
+ import time
8
+ import scipy
9
+ import scipy.spatial
10
+ import pymp
11
+
12
+
13
+ #####################
14
+ QUERY_POINTS = np.asarray([30, 34, 31, 55, 29, 84, 35, 108, 34, 145, 29, 171, 27,
15
+ 196, 29, 228, 58, 35, 61, 55, 57, 83, 56, 109, 63, 148, 58, 164, 57, 197, 60,
16
+ 227, 81, 26, 87, 58, 85, 87, 89, 117, 86, 142, 89, 172, 84, 197, 88, 227, 113,
17
+ 32, 116, 58, 112, 88, 118, 113, 109, 147, 114, 173, 119, 201, 113, 229, 139,
18
+ 29, 141, 59, 142, 93, 139, 117, 146, 147, 141, 173, 142, 201, 143, 227, 170,
19
+ 26, 173, 59, 166, 90, 174, 117, 176, 141, 169, 175, 167, 198, 172, 227, 198,
20
+ 30, 195, 59, 204, 85, 198, 116, 195, 140, 198, 175, 194, 193, 199, 227, 221,
21
+ 26, 223, 57, 227, 83, 227, 113, 227, 140, 226, 173, 230, 196, 228, 229]).reshape((64, 2))
22
+ #####################
23
+
24
+ class SegBPS():
25
+
26
+ def __init__(self, query_points=QUERY_POINTS, size=256):
27
+ self.size = size
28
+ self.query_points = query_points
29
+ row, col = np.indices((self.size, self.size))
30
+ self.indices_rc = np.stack((row, col), axis=2) # (256, 256, 2)
31
+ self.pts_aranged = np.arange(64)
32
+ return
33
+
34
+ def _do_kdtree(self, combined_x_y_arrays, points):
35
+ # see https://stackoverflow.com/questions/10818546/finding-index-of-nearest-
36
+ # point-in-numpy-arrays-of-x-and-y-coordinates
37
+ mytree = scipy.spatial.cKDTree(combined_x_y_arrays)
38
+ dist, indexes = mytree.query(points)
39
+ return indexes
40
+
41
+ def calculate_bps_points(self, seg, thr=0.5, vis=False, out_path=None):
42
+ # seg: input segmentation image of shape (256, 256) with values between 0 and 1
43
+ query_val = seg[self.query_points[:, 0], self.query_points[:, 1]]
44
+ pts_fg = self.pts_aranged[query_val>=thr]
45
+ pts_bg = self.pts_aranged[query_val<thr]
46
+ candidate_inds_bg = self.indices_rc[seg<thr]
47
+ candidate_inds_fg = self.indices_rc[seg>=thr]
48
+ if candidate_inds_bg.shape[0] == 0:
49
+ candidate_inds_bg = np.ones((1, 2)) * 128 # np.zeros((1, 2))
50
+ if candidate_inds_fg.shape[0] == 0:
51
+ candidate_inds_fg = np.ones((1, 2)) * 128 # np.zeros((1, 2))
52
+ # calculate nearest points
53
+ all_nearest_points = np.zeros((64, 2))
54
+ all_nearest_points[pts_fg, :] = candidate_inds_bg[self._do_kdtree(candidate_inds_bg, self.query_points[pts_fg, :]), :]
55
+ all_nearest_points[pts_bg, :] = candidate_inds_fg[self._do_kdtree(candidate_inds_fg, self.query_points[pts_bg, :]), :]
56
+ all_nearest_points_01 = all_nearest_points / 255.
57
+ if vis:
58
+ self.visualize_result(seg, all_nearest_points, out_path=out_path)
59
+ return all_nearest_points_01
60
+
61
+ def calculate_bps_points_batch(self, seg_batch, thr=0.5, vis=False, out_path=None):
62
+ # seg_batch: input segmentation image of shape (bs, 256, 256) with values between 0 and 1
63
+ bs = seg_batch.shape[0]
64
+ all_nearest_points_01_batch = np.zeros((bs, self.query_points.shape[0], 2))
65
+ for ind in range(0, bs): # 0.25
66
+ seg = seg_batch[ind, :, :]
67
+ all_nearest_points_01 = self.calculate_bps_points(seg, thr=thr, vis=vis, out_path=out_path)
68
+ all_nearest_points_01_batch[ind, :, :] = all_nearest_points_01
69
+ return all_nearest_points_01_batch
70
+
71
+ def visualize_result(self, seg, all_nearest_points, out_path=None):
72
+ import matplotlib as mpl
73
+ mpl.use('Agg')
74
+ import matplotlib.pyplot as plt
75
+ # img: (256, 256, 3)
76
+ img = (np.stack((seg, seg, seg), axis=2) * 155).astype(np.int)
77
+ if out_path is None:
78
+ ind_img = 0
79
+ out_path = '../test_img' + str(ind_img) + '.png'
80
+ fig, ax = plt.subplots()
81
+ plt.imshow(img)
82
+ plt.gca().set_axis_off()
83
+ plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
84
+ plt.margins(0,0)
85
+ ratio_in_out = 1 # 255
86
+ for idx, (y, x) in enumerate(self.query_points):
87
+ x = int(x*ratio_in_out)
88
+ y = int(y*ratio_in_out)
89
+ plt.scatter([x], [y], marker="x", s=50)
90
+ x2 = int(all_nearest_points[idx, 1])
91
+ y2 = int(all_nearest_points[idx, 0])
92
+ plt.scatter([x2], [y2], marker="o", s=50)
93
+ plt.plot([x, x2], [y, y2])
94
+ plt.savefig(out_path, bbox_inches='tight', pad_inches=0)
95
+ plt.close()
96
+ return
97
+
98
+
99
+
100
+
101
+
102
+ if __name__ == "__main__":
103
+ ind_img = 2 # 4
104
+ path_seg_top = '...../pytorch-stacked-hourglass/results/dogs_hg8_ks_24_v1/test/'
105
+ path_seg = os.path.join(path_seg_top, 'seg_big_' + str(ind_img) + '.png')
106
+ img = np.asarray(Image.open(path_seg))
107
+ # min is 0.004, max is 0.9
108
+ # low values are background, high values are foreground
109
+ seg = img[:, :, 1] / 255.
110
+ # calculate points
111
+ bps = SegBPS()
112
+ bps.calculate_bps_points(seg, thr=0.5, vis=False, out_path=None)
113
+
114
+
src/combined_model/loss_image_to_3d_withbreedrel.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ import numpy as np
5
+ import pickle as pkl
6
+
7
+ import os
8
+ import sys
9
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
10
+ # from priors.pose_prior_35 import Prior
11
+ # from priors.tiger_pose_prior.tiger_pose_prior import GaussianMixturePrior
12
+ from priors.normalizing_flow_prior.normalizing_flow_prior import NormalizingFlowPrior
13
+ from priors.shape_prior import ShapePrior
14
+ from lifting_to_3d.utils.geometry_utils import rot6d_to_rotmat, batch_rot2aa
15
+ from configs.SMAL_configs import UNITY_SMAL_SHAPE_PRIOR_DOGS
16
+
17
+ class Loss(torch.nn.Module):
18
+ def __init__(self, data_info, nf_version=None):
19
+ super(Loss, self).__init__()
20
+ self.criterion_regr = torch.nn.MSELoss() # takes the mean
21
+ self.criterion_class = torch.nn.CrossEntropyLoss()
22
+ self.data_info = data_info
23
+ self.register_buffer('keypoint_weights', torch.tensor(data_info.keypoint_weights)[None, :])
24
+ self.l_anchor = None
25
+ self.l_pos = None
26
+ self.l_neg = None
27
+
28
+ if nf_version is not None:
29
+ self.normalizing_flow_pose_prior = NormalizingFlowPrior(nf_version=nf_version)
30
+ self.shape_prior = ShapePrior(UNITY_SMAL_SHAPE_PRIOR_DOGS)
31
+ self.criterion_triplet = torch.nn.TripletMarginLoss(margin=1)
32
+
33
+ # load 3d data for the unity dogs (an optional shape prior for 11 breeds)
34
+ with open(UNITY_SMAL_SHAPE_PRIOR_DOGS, 'rb') as f:
35
+ data = pkl.load(f)
36
+ dog_betas_unity = data['dogs_betas']
37
+ self.dog_betas_unity = {29: torch.tensor(dog_betas_unity[0, :]).float(),
38
+ 91: torch.tensor(dog_betas_unity[1, :]).float(),
39
+ 84: torch.tensor(0.5*dog_betas_unity[3, :] + 0.5*dog_betas_unity[14, :]).float(),
40
+ 85: torch.tensor(dog_betas_unity[5, :]).float(),
41
+ 28: torch.tensor(dog_betas_unity[6, :]).float(),
42
+ 94: torch.tensor(dog_betas_unity[7, :]).float(),
43
+ 92: torch.tensor(dog_betas_unity[8, :]).float(),
44
+ 95: torch.tensor(dog_betas_unity[10, :]).float(),
45
+ 20: torch.tensor(dog_betas_unity[11, :]).float(),
46
+ 83: torch.tensor(dog_betas_unity[12, :]).float(),
47
+ 99: torch.tensor(dog_betas_unity[16, :]).float()}
48
+
49
+ def prepare_anchor_pos_neg(self, batch_size, device):
50
+ l0 = np.arange(0, batch_size, 2)
51
+ l_anchor = []
52
+ l_pos = []
53
+ l_neg = []
54
+ for ind in l0:
55
+ xx = set(np.arange(0, batch_size))
56
+ xx.discard(ind)
57
+ xx.discard(ind+1)
58
+ for ind2 in xx:
59
+ if ind2 % 2 == 0:
60
+ l_anchor.append(ind)
61
+ l_pos.append(ind + 1)
62
+ else:
63
+ l_anchor.append(ind + 1)
64
+ l_pos.append(ind)
65
+ l_neg.append(ind2)
66
+ self.l_anchor = torch.Tensor(l_anchor).to(torch.int64).to(device)
67
+ self.l_pos = torch.Tensor(l_pos).to(torch.int64).to(device)
68
+ self.l_neg = torch.Tensor(l_neg).to(torch.int64).to(device)
69
+ return
70
+
71
+
72
+ def forward(self, output_reproj, target_dict, weight_dict=None):
73
+ # output_reproj: ['vertices_smal', 'keyp_3d', 'keyp_2d', 'silh_image']
74
+ # target_dict: ['index', 'center', 'scale', 'pts', 'tpts', 'target_weight']
75
+ batch_size = output_reproj['keyp_2d'].shape[0]
76
+
77
+ # loss on reprojected keypoints
78
+ output_kp_resh = (output_reproj['keyp_2d']).reshape((-1, 2))
79
+ target_kp_resh = (target_dict['tpts'][:, :, :2] / 64. * (256. - 1)).reshape((-1, 2))
80
+ weights_resh = target_dict['tpts'][:, :, 2].reshape((-1))
81
+ keyp_w_resh = self.keypoint_weights.repeat((batch_size, 1)).reshape((-1))
82
+ loss_keyp = ((((output_kp_resh - target_kp_resh)[weights_resh>0]**2).sum(axis=1).sqrt()*weights_resh[weights_resh>0])*keyp_w_resh[weights_resh>0]).sum() / \
83
+ max((weights_resh[weights_resh>0]*keyp_w_resh[weights_resh>0]).sum(), 1e-5)
84
+
85
+ # loss on reprojected silhouette
86
+ assert output_reproj['silh'].shape == (target_dict['silh'][:, None, :, :]).shape
87
+ silh_loss_type = 'default'
88
+ if silh_loss_type == 'default':
89
+ with torch.no_grad():
90
+ thr_silh = 20
91
+ diff = torch.norm(output_kp_resh - target_kp_resh, dim=1)
92
+ diff_x = diff.reshape((batch_size, -1))
93
+ weights_resh_x = weights_resh.reshape((batch_size, -1))
94
+ unweighted_kp_mean_dist = (diff_x * weights_resh_x).sum(dim=1) / ((weights_resh_x).sum(dim=1)+1e-6)
95
+ loss_silh_bs = ((output_reproj['silh'] - target_dict['silh'][:, None, :, :]) ** 2).sum(axis=3).sum(axis=2).sum(axis=1) / (output_reproj['silh'].shape[2]*output_reproj['silh'].shape[3])
96
+ loss_silh = loss_silh_bs[unweighted_kp_mean_dist<thr_silh].sum() / batch_size
97
+ else:
98
+ print('silh_loss_type: ' + silh_loss_type)
99
+ raise ValueError
100
+
101
+ # shape regularization
102
+ # 'smal': loss on betas (pca coefficients), betas should be close to 0
103
+ # 'limbs...' loss on selected betas_limbs
104
+ loss_shape_weighted_list = [torch.zeros((1)).mean().to(output_reproj['keyp_2d'].device)]
105
+ for ind_sp, sp in enumerate(weight_dict['shape_options']):
106
+ weight_sp = weight_dict['shape'][ind_sp]
107
+ # self.logscale_part_list = ['legs_l', 'legs_f', 'tail_l', 'tail_f', 'ears_y', 'ears_l', 'head_l']
108
+ if sp == 'smal':
109
+ loss_shape_tmp = self.shape_prior(output_reproj['betas'])
110
+ elif sp == 'limbs':
111
+ loss_shape_tmp = torch.mean((output_reproj['betas_limbs'])**2)
112
+ elif sp == 'limbs7':
113
+ limb_coeffs_list = [0.01, 1, 0.1, 1, 1, 0.1, 2]
114
+ limb_coeffs = torch.tensor(limb_coeffs_list).to(torch.float32).to(target_dict['tpts'].device)
115
+ loss_shape_tmp = torch.mean((output_reproj['betas_limbs'] * limb_coeffs[None, :])**2)
116
+ else:
117
+ raise NotImplementedError
118
+ loss_shape_weighted_list.append(weight_sp * loss_shape_tmp)
119
+ loss_shape_weighted = torch.stack((loss_shape_weighted_list)).sum()
120
+
121
+ # 3D loss for dogs for which we have a unity model or toy figure
122
+ loss_models3d = torch.zeros((1)).mean().to(output_reproj['betas'].device)
123
+ if 'models3d' in weight_dict.keys():
124
+ if weight_dict['models3d'] > 0:
125
+ for ind_dog in range(target_dict['breed_index'].shape[0]):
126
+ breed_index = np.asscalar(target_dict['breed_index'][ind_dog].detach().cpu().numpy())
127
+ if breed_index in self.dog_betas_unity.keys():
128
+ betas_target = self.dog_betas_unity[breed_index][:output_reproj['betas'].shape[1]].to(output_reproj['betas'].device)
129
+ betas_output = output_reproj['betas'][ind_dog, :]
130
+ betas_limbs_output = output_reproj['betas_limbs'][ind_dog, :]
131
+ loss_models3d += ((betas_limbs_output**2).sum() + ((betas_output-betas_target)**2).sum()) / (output_reproj['betas'].shape[1] + output_reproj['betas_limbs'].shape[1])
132
+ else:
133
+ weight_dict['models3d'] = 0
134
+
135
+ # shape resularization loss on shapedirs
136
+ # -> in the current version shapedirs are kept fixed, so we don't need those losses
137
+ if weight_dict['shapedirs'] > 0:
138
+ raise NotImplementedError
139
+ else:
140
+ loss_shapedirs = torch.zeros((1)).mean().to(output_reproj['betas'].device)
141
+
142
+ # prior on back joints (not used in cvpr 2022 paper)
143
+ # -> elementwise MSE loss on all 6 coefficients of 6d rotation representation
144
+ if 'pose_0' in weight_dict.keys():
145
+ if weight_dict['pose_0'] > 0:
146
+ pred_pose_rot6d = output_reproj['pose_rot6d']
147
+ w_rj_np = np.zeros((pred_pose_rot6d.shape[1]))
148
+ w_rj_np[[2, 3, 4, 5]] = 1.0 # back
149
+ w_rj = torch.tensor(w_rj_np).to(torch.float32).to(pred_pose_rot6d.device)
150
+ zero_rot = torch.tensor([1, 0, 0, 1, 0, 0]).to(pred_pose_rot6d.device).to(torch.float32)[None, None, :].repeat((batch_size, pred_pose_rot6d.shape[1], 1))
151
+ loss_pose = self.criterion_regr(pred_pose_rot6d*w_rj[None, :, None], zero_rot*w_rj[None, :, None])
152
+ else:
153
+ loss_pose = torch.zeros((1)).mean()
154
+
155
+ # pose prior
156
+ # -> we did experiment with different pose priors, for example:
157
+ # * similart to SMALify (https://github.com/benjiebob/SMALify/blob/master/smal_fitter/smal_fitter.py,
158
+ # https://github.com/benjiebob/SMALify/blob/master/smal_fitter/priors/pose_prior_35.py)
159
+ # * vae
160
+ # * normalizing flow pose prior
161
+ # -> our cvpr 2022 paper uses the normalizing flow pose prior as implemented below
162
+ if 'poseprior' in weight_dict.keys():
163
+ if weight_dict['poseprior'] > 0:
164
+ pred_pose_rot6d = output_reproj['pose_rot6d']
165
+ pred_pose = rot6d_to_rotmat(pred_pose_rot6d.reshape((-1, 6))).reshape((batch_size, -1, 3, 3))
166
+ if 'normalizing_flow_tiger' in weight_dict['poseprior_options']:
167
+ if output_reproj['normflow_z'] is not None:
168
+ loss_poseprior = self.normalizing_flow_pose_prior.calculate_loss_from_z(output_reproj['normflow_z'], type='square')
169
+ else:
170
+ loss_poseprior = self.normalizing_flow_pose_prior.calculate_loss(pred_pose_rot6d, type='square')
171
+ elif 'normalizing_flow_tiger_logprob' in weight_dict['poseprior_options']:
172
+ if output_reproj['normflow_z'] is not None:
173
+ loss_poseprior = self.normalizing_flow_pose_prior.calculate_loss_from_z(output_reproj['normflow_z'], type='neg_log_prob')
174
+ else:
175
+ loss_poseprior = self.normalizing_flow_pose_prior.calculate_loss(pred_pose_rot6d, type='neg_log_prob')
176
+ else:
177
+ raise NotImplementedError
178
+ else:
179
+ loss_poseprior = torch.zeros((1)).mean()
180
+ else:
181
+ weight_dict['poseprior'] = 0
182
+ loss_poseprior = torch.zeros((1)).mean()
183
+
184
+ # add a prior which penalizes side-movement angles for legs
185
+ if 'poselegssidemovement' in weight_dict.keys():
186
+ use_pose_legs_side_loss = True
187
+ else:
188
+ use_pose_legs_side_loss = False
189
+ if use_pose_legs_side_loss:
190
+ leg_indices_right = np.asarray([7, 8, 9, 10, 17, 18, 19, 20]) # front, back
191
+ leg_indices_left = np.asarray([11, 12, 13, 14, 21, 22, 23, 24]) # front, back
192
+ vec = torch.zeros((3, 1)).to(device=pred_pose.device, dtype=pred_pose.dtype)
193
+ vec[2] = -1
194
+ x0_rotmat = pred_pose
195
+ x0_rotmat_legs_left = x0_rotmat[:, leg_indices_left, :, :]
196
+ x0_rotmat_legs_right = x0_rotmat[:, leg_indices_right, :, :]
197
+ x0_legs_left = x0_rotmat_legs_left.reshape((-1, 3, 3))@vec
198
+ x0_legs_right = x0_rotmat_legs_right.reshape((-1, 3, 3))@vec
199
+ eps=0 # 1e-7
200
+ # use the component of the vector which points to the side
201
+ loss_poselegssidemovement = (x0_legs_left[:, 1]**2).mean() + (x0_legs_right[:, 1]**2).mean()
202
+ else:
203
+ loss_poselegssidemovement = torch.zeros((1)).mean()
204
+ weight_dict['poselegssidemovement'] = 0
205
+
206
+ # dog breed classification loss
207
+ dog_breed_gt = target_dict['breed_index']
208
+ dog_breed_pred = output_reproj['dog_breed']
209
+ loss_class = self.criterion_class(dog_breed_pred, dog_breed_gt)
210
+
211
+ # dog breed relationship loss
212
+ # -> we did experiment with many other options, but none was significantly better
213
+ if '4' in weight_dict['breed_options']: # we have pairs of dogs of the same breed
214
+ assert weight_dict['breed'] > 0
215
+ z = output_reproj['z']
216
+ # go through all pairs and compare them to each other sample
217
+ if self.l_anchor is None:
218
+ self.prepare_anchor_pos_neg(batch_size, z.device)
219
+ anchor = torch.index_select(z, 0, self.l_anchor)
220
+ positive = torch.index_select(z, 0, self.l_pos)
221
+ negative = torch.index_select(z, 0, self.l_neg)
222
+ loss_breed = self.criterion_triplet(anchor, positive, negative)
223
+ else:
224
+ loss_breed = torch.zeros((1)).mean()
225
+
226
+ # regularizarion for focal length
227
+ loss_flength_near_mean = torch.mean(output_reproj['flength']**2)
228
+ loss_flength = loss_flength_near_mean
229
+
230
+ # bodypart segmentation loss
231
+ if 'partseg' in weight_dict.keys():
232
+ if weight_dict['partseg'] > 0:
233
+ raise NotImplementedError
234
+ else:
235
+ loss_partseg = torch.zeros((1)).mean()
236
+ else:
237
+ weight_dict['partseg'] = 0
238
+ loss_partseg = torch.zeros((1)).mean()
239
+
240
+ # weight and combine losses
241
+ loss_keyp_weighted = loss_keyp * weight_dict['keyp']
242
+ loss_silh_weighted = loss_silh * weight_dict['silh']
243
+ loss_shapedirs_weighted = loss_shapedirs * weight_dict['shapedirs']
244
+ loss_pose_weighted = loss_pose * weight_dict['pose_0']
245
+ loss_class_weighted = loss_class * weight_dict['class']
246
+ loss_breed_weighted = loss_breed * weight_dict['breed']
247
+ loss_flength_weighted = loss_flength * weight_dict['flength']
248
+ loss_poseprior_weighted = loss_poseprior * weight_dict['poseprior']
249
+ loss_partseg_weighted = loss_partseg * weight_dict['partseg']
250
+ loss_models3d_weighted = loss_models3d * weight_dict['models3d']
251
+ loss_poselegssidemovement_weighted = loss_poselegssidemovement * weight_dict['poselegssidemovement']
252
+
253
+ ####################################################################################################
254
+ loss = loss_keyp_weighted + loss_silh_weighted + loss_shape_weighted + loss_pose_weighted + loss_class_weighted + \
255
+ loss_shapedirs_weighted + loss_breed_weighted + loss_flength_weighted + loss_poseprior_weighted + \
256
+ loss_partseg_weighted + loss_models3d_weighted + loss_poselegssidemovement_weighted
257
+ ####################################################################################################
258
+
259
+ loss_dict = {'loss': loss.item(),
260
+ 'loss_keyp_weighted': loss_keyp_weighted.item(), \
261
+ 'loss_silh_weighted': loss_silh_weighted.item(), \
262
+ 'loss_shape_weighted': loss_shape_weighted.item(), \
263
+ 'loss_shapedirs_weighted': loss_shapedirs_weighted.item(), \
264
+ 'loss_pose0_weighted': loss_pose_weighted.item(), \
265
+ 'loss_class_weighted': loss_class_weighted.item(), \
266
+ 'loss_breed_weighted': loss_breed_weighted.item(), \
267
+ 'loss_flength_weighted': loss_flength_weighted.item(), \
268
+ 'loss_poseprior_weighted': loss_poseprior_weighted.item(), \
269
+ 'loss_partseg_weighted': loss_partseg_weighted.item(), \
270
+ 'loss_models3d_weighted': loss_models3d_weighted.item(), \
271
+ 'loss_poselegssidemovement_weighted': loss_poselegssidemovement_weighted.item()}
272
+
273
+ return loss, loss_dict
274
+
275
+
276
+
277
+
src/combined_model/model_shape_v7.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import pickle as pkl
3
+ import numpy as np
4
+ import torchvision.models as models
5
+ from torchvision import transforms
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn.parameter import Parameter
9
+ from kornia.geometry.subpix import dsnt # kornia 0.4.0
10
+
11
+ import os
12
+ import sys
13
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
14
+ from stacked_hourglass.utils.evaluation import get_preds_soft
15
+ from stacked_hourglass import hg1, hg2, hg8
16
+ from lifting_to_3d.linear_model import LinearModelComplete, LinearModel
17
+ from lifting_to_3d.inn_model_for_shape import INNForShape
18
+ from lifting_to_3d.utils.geometry_utils import rot6d_to_rotmat, rotmat_to_rot6d
19
+ from smal_pytorch.smal_model.smal_torch_new import SMAL
20
+ from smal_pytorch.renderer.differentiable_renderer import SilhRenderer
21
+ from bps_2d.bps_for_segmentation import SegBPS
22
+ from configs.SMAL_configs import UNITY_SMAL_SHAPE_PRIOR_DOGS as SHAPE_PRIOR
23
+ from configs.SMAL_configs import MEAN_DOG_BONE_LENGTHS_NO_RED, VERTEX_IDS_TAIL
24
+
25
+
26
+
27
+ class SmallLinear(nn.Module):
28
+ def __init__(self, input_size=64, output_size=30, linear_size=128):
29
+ super(SmallLinear, self).__init__()
30
+ self.relu = nn.ReLU(inplace=True)
31
+ self.w1 = nn.Linear(input_size, linear_size)
32
+ self.w2 = nn.Linear(linear_size, linear_size)
33
+ self.w3 = nn.Linear(linear_size, output_size)
34
+ def forward(self, x):
35
+ # pre-processing
36
+ y = self.w1(x)
37
+ y = self.relu(y)
38
+ y = self.w2(y)
39
+ y = self.relu(y)
40
+ y = self.w3(y)
41
+ return y
42
+
43
+
44
+ class MyConv1d(nn.Module):
45
+ def __init__(self, input_size=37, output_size=30, start=True):
46
+ super(MyConv1d, self).__init__()
47
+ self.input_size = input_size
48
+ self.output_size = output_size
49
+ self.start = start
50
+ self.weight = Parameter(torch.ones((self.output_size)))
51
+ self.bias = Parameter(torch.zeros((self.output_size)))
52
+ def forward(self, x):
53
+ # pre-processing
54
+ if self.start:
55
+ y = x[:, :self.output_size]
56
+ else:
57
+ y = x[:, -self.output_size:]
58
+ y = y * self.weight[None, :] + self.bias[None, :]
59
+ return y
60
+
61
+
62
+ class ModelShapeAndBreed(nn.Module):
63
+ def __init__(self, n_betas=10, n_betas_limbs=13, n_breeds=121, n_z=512, structure_z_to_betas='default'):
64
+ super(ModelShapeAndBreed, self).__init__()
65
+ self.n_betas = n_betas
66
+ self.n_betas_limbs = n_betas_limbs # n_betas_logscale
67
+ self.n_breeds = n_breeds
68
+ self.structure_z_to_betas = structure_z_to_betas
69
+ if self.structure_z_to_betas == '1dconv':
70
+ if not (n_z == self.n_betas+self.n_betas_limbs):
71
+ raise ValueError
72
+ # shape branch
73
+ self.resnet = models.resnet34(pretrained=False)
74
+ # replace the first layer
75
+ n_in = 3 + 1
76
+ self.resnet.conv1 = nn.Conv2d(n_in, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
77
+ # replace the last layer
78
+ self.resnet.fc = nn.Linear(512, n_z)
79
+ # softmax
80
+ self.soft_max = torch.nn.Softmax(dim=1)
81
+ # fc network (and other versions) to connect z with betas
82
+ p_dropout = 0.2
83
+ if self.structure_z_to_betas == 'default':
84
+ self.linear_betas = LinearModel(linear_size=1024,
85
+ num_stage=1,
86
+ p_dropout=p_dropout,
87
+ input_size=n_z,
88
+ output_size=self.n_betas)
89
+ self.linear_betas_limbs = LinearModel(linear_size=1024,
90
+ num_stage=1,
91
+ p_dropout=p_dropout,
92
+ input_size=n_z,
93
+ output_size=self.n_betas_limbs)
94
+ elif self.structure_z_to_betas == 'lin':
95
+ self.linear_betas = nn.Linear(n_z, self.n_betas)
96
+ self.linear_betas_limbs = nn.Linear(n_z, self.n_betas_limbs)
97
+ elif self.structure_z_to_betas == 'fc_0':
98
+ self.linear_betas = SmallLinear(linear_size=128, # 1024,
99
+ input_size=n_z,
100
+ output_size=self.n_betas)
101
+ self.linear_betas_limbs = SmallLinear(linear_size=128, # 1024,
102
+ input_size=n_z,
103
+ output_size=self.n_betas_limbs)
104
+ elif structure_z_to_betas == 'fc_1':
105
+ self.linear_betas = LinearModel(linear_size=64, # 1024,
106
+ num_stage=1,
107
+ p_dropout=0,
108
+ input_size=n_z,
109
+ output_size=self.n_betas)
110
+ self.linear_betas_limbs = LinearModel(linear_size=64, # 1024,
111
+ num_stage=1,
112
+ p_dropout=0,
113
+ input_size=n_z,
114
+ output_size=self.n_betas_limbs)
115
+ elif self.structure_z_to_betas == '1dconv':
116
+ self.linear_betas = MyConv1d(n_z, self.n_betas, start=True)
117
+ self.linear_betas_limbs = MyConv1d(n_z, self.n_betas_limbs, start=False)
118
+ elif self.structure_z_to_betas == 'inn':
119
+ self.linear_betas_and_betas_limbs = INNForShape(self.n_betas, self.n_betas_limbs, betas_scale=1.0, betas_limbs_scale=1.0)
120
+ else:
121
+ raise ValueError
122
+ # network to connect latent shape vector z with dog breed classification
123
+ self.linear_breeds = LinearModel(linear_size=1024, # 1024,
124
+ num_stage=1,
125
+ p_dropout=p_dropout,
126
+ input_size=n_z,
127
+ output_size=self.n_breeds)
128
+ # shape multiplicator
129
+ self.shape_multiplicator_np = np.ones(self.n_betas)
130
+ with open(SHAPE_PRIOR, 'rb') as file:
131
+ u = pkl._Unpickler(file)
132
+ u.encoding = 'latin1'
133
+ res = u.load()
134
+ # shape predictions are centered around the mean dog of our dog model
135
+ self.betas_mean_np = res['dog_cluster_mean']
136
+
137
+ def forward(self, img, seg_raw=None, seg_prep=None):
138
+ # img is the network input image
139
+ # seg_raw is before softmax and subtracting 0.5
140
+ # seg_prep would be the prepared_segmentation
141
+ if seg_prep is None:
142
+ seg_prep = self.soft_max(seg_raw)[:, 1:2, :, :] - 0.5
143
+ input_img_and_seg = torch.cat((img, seg_prep), axis=1)
144
+ res_output = self.resnet(input_img_and_seg)
145
+ dog_breed_output = self.linear_breeds(res_output)
146
+ if self.structure_z_to_betas == 'inn':
147
+ shape_output_orig, shape_limbs_output_orig = self.linear_betas_and_betas_limbs(res_output)
148
+ else:
149
+ shape_output_orig = self.linear_betas(res_output) * 0.1
150
+ betas_mean = torch.tensor(self.betas_mean_np).float().to(img.device)
151
+ shape_output = shape_output_orig + betas_mean[None, 0:self.n_betas]
152
+ shape_limbs_output_orig = self.linear_betas_limbs(res_output)
153
+ shape_limbs_output = shape_limbs_output_orig * 0.1
154
+ output_dict = {'z': res_output,
155
+ 'breeds': dog_breed_output,
156
+ 'betas': shape_output_orig,
157
+ 'betas_limbs': shape_limbs_output_orig}
158
+ return output_dict
159
+
160
+
161
+
162
+ class LearnableShapedirs(nn.Module):
163
+ def __init__(self, sym_ids_dict, shapedirs_init, n_betas, n_betas_fixed=10):
164
+ super(LearnableShapedirs, self).__init__()
165
+ # shapedirs_init = self.smal.shapedirs.detach()
166
+ self.n_betas = n_betas
167
+ self.n_betas_fixed = n_betas_fixed
168
+ self.sym_ids_dict = sym_ids_dict
169
+ sym_left_ids = self.sym_ids_dict['left']
170
+ sym_right_ids = self.sym_ids_dict['right']
171
+ sym_center_ids = self.sym_ids_dict['center']
172
+ self.n_center = sym_center_ids.shape[0]
173
+ self.n_left = sym_left_ids.shape[0]
174
+ self.n_sd = self.n_betas - self.n_betas_fixed # number of learnable shapedirs
175
+ # get indices to go from half_shapedirs to shapedirs
176
+ inds_back = np.zeros((3889))
177
+ for ind in range(0, sym_center_ids.shape[0]):
178
+ ind_in_forward = sym_center_ids[ind]
179
+ inds_back[ind_in_forward] = ind
180
+ for ind in range(0, sym_left_ids.shape[0]):
181
+ ind_in_forward = sym_left_ids[ind]
182
+ inds_back[ind_in_forward] = sym_center_ids.shape[0] + ind
183
+ for ind in range(0, sym_right_ids.shape[0]):
184
+ ind_in_forward = sym_right_ids[ind]
185
+ inds_back[ind_in_forward] = sym_center_ids.shape[0] + sym_left_ids.shape[0] + ind
186
+ self.register_buffer('inds_back_torch', torch.Tensor(inds_back).long())
187
+ # self.smal.shapedirs: (51, 11667)
188
+ # shapedirs: (3889, 3, n_sd)
189
+ # shapedirs_half: (2012, 3, n_sd)
190
+ sd = shapedirs_init[:self.n_betas, :].permute((1, 0)).reshape((-1, 3, self.n_betas))
191
+ self.register_buffer('sd', sd)
192
+ sd_center = sd[sym_center_ids, :, self.n_betas_fixed:]
193
+ sd_left = sd[sym_left_ids, :, self.n_betas_fixed:]
194
+ self.register_parameter('learnable_half_shapedirs_c0', torch.nn.Parameter(sd_center[:, 0, :].detach()))
195
+ self.register_parameter('learnable_half_shapedirs_c2', torch.nn.Parameter(sd_center[:, 2, :].detach()))
196
+ self.register_parameter('learnable_half_shapedirs_l0', torch.nn.Parameter(sd_left[:, 0, :].detach()))
197
+ self.register_parameter('learnable_half_shapedirs_l1', torch.nn.Parameter(sd_left[:, 1, :].detach()))
198
+ self.register_parameter('learnable_half_shapedirs_l2', torch.nn.Parameter(sd_left[:, 2, :].detach()))
199
+ def forward(self):
200
+ device = self.learnable_half_shapedirs_c0.device
201
+ half_shapedirs_center = torch.stack((self.learnable_half_shapedirs_c0, \
202
+ torch.zeros((self.n_center, self.n_sd)).to(device), \
203
+ self.learnable_half_shapedirs_c2), axis=1)
204
+ half_shapedirs_left = torch.stack((self.learnable_half_shapedirs_l0, \
205
+ self.learnable_half_shapedirs_l1, \
206
+ self.learnable_half_shapedirs_l2), axis=1)
207
+ half_shapedirs_right = torch.stack((self.learnable_half_shapedirs_l0, \
208
+ - self.learnable_half_shapedirs_l1, \
209
+ self.learnable_half_shapedirs_l2), axis=1)
210
+ half_shapedirs_tot = torch.cat((half_shapedirs_center, half_shapedirs_left, half_shapedirs_right))
211
+ shapedirs = torch.index_select(half_shapedirs_tot, dim=0, index=self.inds_back_torch)
212
+ shapedirs_complete = torch.cat((self.sd[:, :, :self.n_betas_fixed], shapedirs), axis=2) # (3889, 3, n_sd)
213
+ shapedirs_complete_prepared = torch.cat((self.sd[:, :, :10], shapedirs), axis=2).reshape((-1, 30)).permute((1, 0)) # (n_sd, 11667)
214
+ return shapedirs_complete, shapedirs_complete_prepared
215
+
216
+
217
+
218
+
219
+
220
+ class ModelImageToBreed(nn.Module):
221
+ def __init__(self, arch='hg8', n_joints=35, n_classes=20, n_partseg=15, n_keyp=20, n_bones=24, n_betas=10, n_betas_limbs=7, n_breeds=121, image_size=256, n_z=512, thr_keyp_sc=None, add_partseg=True):
222
+ super(ModelImageToBreed, self).__init__()
223
+ self.n_classes = n_classes
224
+ self.n_partseg = n_partseg
225
+ self.n_betas = n_betas
226
+ self.n_betas_limbs = n_betas_limbs
227
+ self.n_keyp = n_keyp
228
+ self.n_bones = n_bones
229
+ self.n_breeds = n_breeds
230
+ self.image_size = image_size
231
+ self.upsample_seg = True
232
+ self.threshold_scores = thr_keyp_sc
233
+ self.n_z = n_z
234
+ self.add_partseg = add_partseg
235
+ # ------------------------------ STACKED HOUR GLASS ------------------------------
236
+ if arch == 'hg8':
237
+ self.stacked_hourglass = hg8(pretrained=False, num_classes=self.n_classes, num_partseg=self.n_partseg, upsample_seg=self.upsample_seg, add_partseg=self.add_partseg)
238
+ else:
239
+ raise Exception('unrecognised model architecture: ' + arch)
240
+ # ------------------------------ SHAPE AND BREED MODEL ------------------------------
241
+ self.breed_model = ModelShapeAndBreed(n_betas=self.n_betas, n_betas_limbs=self.n_betas_limbs, n_breeds=self.n_breeds, n_z=self.n_z)
242
+ def forward(self, input_img, norm_dict=None, bone_lengths_prepared=None, betas=None):
243
+ batch_size = input_img.shape[0]
244
+ device = input_img.device
245
+ # ------------------------------ STACKED HOUR GLASS ------------------------------
246
+ hourglass_out_dict = self.stacked_hourglass(input_img)
247
+ last_seg = hourglass_out_dict['seg_final']
248
+ last_heatmap = hourglass_out_dict['out_list_kp'][-1]
249
+ # - prepare keypoints (from heatmap)
250
+ # normalize predictions -> from logits to probability distribution
251
+ # last_heatmap_norm = dsnt.spatial_softmax2d(last_heatmap, temperature=torch.tensor(1))
252
+ # keypoints = dsnt.spatial_expectation2d(last_heatmap_norm, normalized_coordinates=False) + 1 # (bs, 20, 2)
253
+ # keypoints_norm = dsnt.spatial_expectation2d(last_heatmap_norm, normalized_coordinates=True) # (bs, 20, 2)
254
+ keypoints_norm, scores = get_preds_soft(last_heatmap, return_maxval=True, norm_coords=True)
255
+ if self.threshold_scores is not None:
256
+ scores[scores>self.threshold_scores] = 1.0
257
+ scores[scores<=self.threshold_scores] = 0.0
258
+ # ------------------------------ SHAPE AND BREED MODEL ------------------------------
259
+ # breed_model takes as input the image as well as the predicted segmentation map
260
+ # -> we need to split up ModelImageTo3d, such that we can use the silhouette
261
+ resnet_output = self.breed_model(img=input_img, seg_raw=last_seg)
262
+ pred_breed = resnet_output['breeds'] # (bs, n_breeds)
263
+ pred_betas = resnet_output['betas']
264
+ pred_betas_limbs = resnet_output['betas_limbs']
265
+ small_output = {'keypoints_norm': keypoints_norm,
266
+ 'keypoints_scores': scores}
267
+ small_output_reproj = {'betas': pred_betas,
268
+ 'betas_limbs': pred_betas_limbs,
269
+ 'dog_breed': pred_breed}
270
+ return small_output, None, small_output_reproj
271
+
272
+ class ModelImageTo3d_withshape_withproj(nn.Module):
273
+ def __init__(self, arch='hg8', num_stage_comb=2, num_stage_heads=1, num_stage_heads_pose=1, trans_sep=False, n_joints=35, n_classes=20, n_partseg=15, n_keyp=20, n_bones=24, n_betas=10, n_betas_limbs=6, n_breeds=121, image_size=256, n_z=512, n_segbps=64*2, thr_keyp_sc=None, add_z_to_3d_input=True, add_segbps_to_3d_input=False, add_partseg=True, silh_no_tail=True, fix_flength=False, render_partseg=False, structure_z_to_betas='default', structure_pose_net='default', nf_version=None):
274
+ super(ModelImageTo3d_withshape_withproj, self).__init__()
275
+ self.n_classes = n_classes
276
+ self.n_partseg = n_partseg
277
+ self.n_betas = n_betas
278
+ self.n_betas_limbs = n_betas_limbs
279
+ self.n_keyp = n_keyp
280
+ self.n_bones = n_bones
281
+ self.n_breeds = n_breeds
282
+ self.image_size = image_size
283
+ self.threshold_scores = thr_keyp_sc
284
+ self.upsample_seg = True
285
+ self.silh_no_tail = silh_no_tail
286
+ self.add_z_to_3d_input = add_z_to_3d_input
287
+ self.add_segbps_to_3d_input = add_segbps_to_3d_input
288
+ self.add_partseg = add_partseg
289
+ assert (not self.add_segbps_to_3d_input) or (not self.add_z_to_3d_input)
290
+ self.n_z = n_z
291
+ if add_segbps_to_3d_input:
292
+ self.n_segbps = n_segbps # 64
293
+ self.segbps_model = SegBPS()
294
+ else:
295
+ self.n_segbps = 0
296
+ self.fix_flength = fix_flength
297
+ self.render_partseg = render_partseg
298
+ self.structure_z_to_betas = structure_z_to_betas
299
+ self.structure_pose_net = structure_pose_net
300
+ assert self.structure_pose_net in ['default', 'vae', 'normflow']
301
+ self.nf_version = nf_version
302
+ self.register_buffer('betas_zeros', torch.zeros((1, self.n_betas)))
303
+ self.register_buffer('mean_dog_bone_lengths', torch.tensor(MEAN_DOG_BONE_LENGTHS_NO_RED, dtype=torch.float32))
304
+ p_dropout = 0.2 # 0.5
305
+ # ------------------------------ SMAL MODEL ------------------------------
306
+ self.smal = SMAL(template_name='neutral')
307
+ # New for rendering without tail
308
+ f_np = self.smal.faces.detach().cpu().numpy()
309
+ self.f_no_tail_np = f_np[np.isin(f_np[:,:], VERTEX_IDS_TAIL).sum(axis=1)==0, :]
310
+ # in theory we could optimize for improved shapedirs, but we do not do that
311
+ # -> would need to implement regularizations
312
+ # -> there are better ways than changing the shapedirs
313
+ self.model_learnable_shapedirs = LearnableShapedirs(self.smal.sym_ids_dict, self.smal.shapedirs.detach(), self.n_betas, 10)
314
+ # ------------------------------ STACKED HOUR GLASS ------------------------------
315
+ if arch == 'hg8':
316
+ self.stacked_hourglass = hg8(pretrained=False, num_classes=self.n_classes, num_partseg=self.n_partseg, upsample_seg=self.upsample_seg, add_partseg=self.add_partseg)
317
+ else:
318
+ raise Exception('unrecognised model architecture: ' + arch)
319
+ # ------------------------------ SHAPE AND BREED MODEL ------------------------------
320
+ self.breed_model = ModelShapeAndBreed(n_betas=self.n_betas, n_betas_limbs=self.n_betas_limbs, n_breeds=self.n_breeds, n_z=self.n_z, structure_z_to_betas=self.structure_z_to_betas)
321
+ # ------------------------------ LINEAR 3D MODEL ------------------------------
322
+ # 3d model -> from image to 3d parameters {2d keypoints from heatmap, pose, trans, flength}
323
+ self.soft_max = torch.nn.Softmax(dim=1)
324
+ input_size = self.n_keyp*3 + self.n_bones
325
+ self.model_3d = LinearModelComplete(linear_size=1024,
326
+ num_stage_comb=num_stage_comb,
327
+ num_stage_heads=num_stage_heads,
328
+ num_stage_heads_pose=num_stage_heads_pose,
329
+ trans_sep=trans_sep,
330
+ p_dropout=p_dropout, # 0.5,
331
+ input_size=input_size,
332
+ intermediate_size=1024,
333
+ output_info=None,
334
+ n_joints=n_joints,
335
+ n_z=self.n_z,
336
+ add_z_to_3d_input=self.add_z_to_3d_input,
337
+ n_segbps=self.n_segbps,
338
+ add_segbps_to_3d_input=self.add_segbps_to_3d_input,
339
+ structure_pose_net=self.structure_pose_net,
340
+ nf_version = self.nf_version)
341
+ # ------------------------------ RENDERING ------------------------------
342
+ self.silh_renderer = SilhRenderer(image_size)
343
+
344
+ def forward(self, input_img, norm_dict=None, bone_lengths_prepared=None, betas=None):
345
+ batch_size = input_img.shape[0]
346
+ device = input_img.device
347
+ # ------------------------------ STACKED HOUR GLASS ------------------------------
348
+ hourglass_out_dict = self.stacked_hourglass(input_img)
349
+ last_seg = hourglass_out_dict['seg_final']
350
+ last_heatmap = hourglass_out_dict['out_list_kp'][-1]
351
+ # - prepare keypoints (from heatmap)
352
+ # normalize predictions -> from logits to probability distribution
353
+ # last_heatmap_norm = dsnt.spatial_softmax2d(last_heatmap, temperature=torch.tensor(1))
354
+ # keypoints = dsnt.spatial_expectation2d(last_heatmap_norm, normalized_coordinates=False) + 1 # (bs, 20, 2)
355
+ # keypoints_norm = dsnt.spatial_expectation2d(last_heatmap_norm, normalized_coordinates=True) # (bs, 20, 2)
356
+ keypoints_norm, scores = get_preds_soft(last_heatmap, return_maxval=True, norm_coords=True)
357
+ if self.threshold_scores is not None:
358
+ scores[scores>self.threshold_scores] = 1.0
359
+ scores[scores<=self.threshold_scores] = 0.0
360
+ # ------------------------------ LEARNABLE SHAPE MODEL ------------------------------
361
+ # in our cvpr 2022 paper we do not change the shapedirs
362
+ # learnable_sd_complete has shape (3889, 3, n_sd)
363
+ # learnable_sd_complete_prepared has shape (n_sd, 11667)
364
+ learnable_sd_complete, learnable_sd_complete_prepared = self.model_learnable_shapedirs()
365
+ shapedirs_sel = learnable_sd_complete_prepared # None
366
+ # ------------------------------ SHAPE AND BREED MODEL ------------------------------
367
+ # breed_model takes as input the image as well as the predicted segmentation map
368
+ # -> we need to split up ModelImageTo3d, such that we can use the silhouette
369
+ resnet_output = self.breed_model(img=input_img, seg_raw=last_seg)
370
+ pred_breed = resnet_output['breeds'] # (bs, n_breeds)
371
+ pred_z = resnet_output['z']
372
+ # - prepare shape
373
+ pred_betas = resnet_output['betas']
374
+ pred_betas_limbs = resnet_output['betas_limbs']
375
+ # - calculate bone lengths
376
+ with torch.no_grad():
377
+ use_mean_bone_lengths = False
378
+ if use_mean_bone_lengths:
379
+ bone_lengths_prepared = torch.cat(batch_size*[self.mean_dog_bone_lengths.reshape((1, -1))])
380
+ else:
381
+ assert (bone_lengths_prepared is None)
382
+ bone_lengths_prepared = self.smal.caclulate_bone_lengths(pred_betas, pred_betas_limbs, shapedirs_sel=shapedirs_sel, short=True)
383
+ # ------------------------------ LINEAR 3D MODEL ------------------------------
384
+ # 3d model -> from image to 3d parameters {2d keypoints from heatmap, pose, trans, flength}
385
+ # prepare input for 2d-to-3d network
386
+ keypoints_prepared = torch.cat((keypoints_norm, scores), axis=2)
387
+ if bone_lengths_prepared is None:
388
+ bone_lengths_prepared = torch.cat(batch_size*[self.mean_dog_bone_lengths.reshape((1, -1))])
389
+ # should we add silhouette to 3d input? should we add z?
390
+ if self.add_segbps_to_3d_input:
391
+ seg_raw = last_seg
392
+ seg_prep_bps = self.soft_max(seg_raw)[:, 1, :, :] # class 1 is the dog
393
+ with torch.no_grad():
394
+ seg_prep_np = seg_prep_bps.detach().cpu().numpy()
395
+ bps_output_np = self.segbps_model.calculate_bps_points_batch(seg_prep_np) # (bs, 64, 2)
396
+ bps_output = torch.tensor(bps_output_np, dtype=torch.float32).to(device).reshape((batch_size, -1))
397
+ bps_output_prep = bps_output * 2. - 1
398
+ input_vec_keyp_bones = torch.cat((keypoints_prepared.reshape((batch_size, -1)), bone_lengths_prepared), axis=1)
399
+ input_vec = torch.cat((input_vec_keyp_bones, bps_output_prep), dim=1)
400
+ elif self.add_z_to_3d_input:
401
+ # we do not use this in our cvpr 2022 version
402
+ input_vec_keyp_bones = torch.cat((keypoints_prepared.reshape((batch_size, -1)), bone_lengths_prepared), axis=1)
403
+ input_vec_additional = pred_z
404
+ input_vec = torch.cat((input_vec_keyp_bones, input_vec_additional), dim=1)
405
+ else:
406
+ input_vec = torch.cat((keypoints_prepared.reshape((batch_size, -1)), bone_lengths_prepared), axis=1)
407
+ # predict 3d parameters (those are normalized, we need to correct mean and std in a next step)
408
+ output = self.model_3d(input_vec)
409
+ # add predicted keypoints to the output dict
410
+ output['keypoints_norm'] = keypoints_norm
411
+ output['keypoints_scores'] = scores
412
+ # - denormalize 3d parameters -> so far predictions were normalized, now we denormalize them again
413
+ pred_trans = output['trans'] * norm_dict['trans_std'][None, :] + norm_dict['trans_mean'][None, :] # (bs, 3)
414
+ if self.structure_pose_net == 'default':
415
+ pred_pose_rot6d = output['pose'] + norm_dict['pose_rot6d_mean'][None, :]
416
+ elif self.structure_pose_net == 'normflow':
417
+ pose_rot6d_mean_zeros = torch.zeros_like(norm_dict['pose_rot6d_mean'][None, :])
418
+ pose_rot6d_mean_zeros[:, 0, :] = norm_dict['pose_rot6d_mean'][None, 0, :]
419
+ pred_pose_rot6d = output['pose'] + pose_rot6d_mean_zeros
420
+ else:
421
+ pose_rot6d_mean_zeros = torch.zeros_like(norm_dict['pose_rot6d_mean'][None, :])
422
+ pose_rot6d_mean_zeros[:, 0, :] = norm_dict['pose_rot6d_mean'][None, 0, :]
423
+ pred_pose_rot6d = output['pose'] + pose_rot6d_mean_zeros
424
+ pred_pose_reshx33 = rot6d_to_rotmat(pred_pose_rot6d.reshape((-1, 6)))
425
+ pred_pose = pred_pose_reshx33.reshape((batch_size, -1, 3, 3))
426
+ pred_pose_rot6d = rotmat_to_rot6d(pred_pose_reshx33).reshape((batch_size, -1, 6))
427
+
428
+ if self.fix_flength:
429
+ output['flength'] = torch.zeros_like(output['flength'])
430
+ pred_flength = torch.ones_like(output['flength'])*2100 # norm_dict['flength_mean'][None, :]
431
+ else:
432
+ pred_flength_orig = output['flength'] * norm_dict['flength_std'][None, :] + norm_dict['flength_mean'][None, :] # (bs, 1)
433
+ pred_flength = pred_flength_orig.clone() # torch.abs(pred_flength_orig)
434
+ pred_flength[pred_flength_orig<=0] = norm_dict['flength_mean'][None, :]
435
+
436
+ # ------------------------------ RENDERING ------------------------------
437
+ # get 3d model (SMAL)
438
+ V, keyp_green_3d, _ = self.smal(beta=pred_betas, betas_limbs=pred_betas_limbs, pose=pred_pose, trans=pred_trans, get_skin=True, keyp_conf='green', shapedirs_sel=shapedirs_sel)
439
+ keyp_3d = keyp_green_3d[:, :self.n_keyp, :] # (bs, 20, 3)
440
+ # render silhouette
441
+ faces_prep = self.smal.faces.unsqueeze(0).expand((batch_size, -1, -1))
442
+ if not self.silh_no_tail:
443
+ pred_silh_images, pred_keyp = self.silh_renderer(vertices=V,
444
+ points=keyp_3d, faces=faces_prep, focal_lengths=pred_flength)
445
+ else:
446
+ faces_no_tail_prep = torch.tensor(self.f_no_tail_np).to(device).expand((batch_size, -1, -1))
447
+ pred_silh_images, pred_keyp = self.silh_renderer(vertices=V,
448
+ points=keyp_3d, faces=faces_no_tail_prep, focal_lengths=pred_flength)
449
+ # get torch 'Meshes'
450
+ torch_meshes = self.silh_renderer.get_torch_meshes(vertices=V, faces=faces_prep)
451
+
452
+ # render body parts (not part of cvpr 2022 version)
453
+ if self.render_partseg:
454
+ raise NotImplementedError
455
+ else:
456
+ partseg_images = None
457
+ partseg_images_hg = None
458
+
459
+ # ------------------------------ PREPARE OUTPUT ------------------------------
460
+ # create output dictionarys
461
+ # output: contains all output from model_image_to_3d
462
+ # output_unnorm: same as output, but normalizations are undone
463
+ # output_reproj: smal output and reprojected keypoints as well as silhouette
464
+ keypoints_heatmap_256 = (output['keypoints_norm'] / 2. + 0.5) * (self.image_size - 1)
465
+ output_unnorm = {'pose_rotmat': pred_pose,
466
+ 'flength': pred_flength,
467
+ 'trans': pred_trans,
468
+ 'keypoints':keypoints_heatmap_256}
469
+ output_reproj = {'vertices_smal': V,
470
+ 'torch_meshes': torch_meshes,
471
+ 'keyp_3d': keyp_3d,
472
+ 'keyp_2d': pred_keyp,
473
+ 'silh': pred_silh_images,
474
+ 'betas': pred_betas,
475
+ 'betas_limbs': pred_betas_limbs,
476
+ 'pose_rot6d': pred_pose_rot6d, # used for pose prior...
477
+ 'dog_breed': pred_breed,
478
+ 'shapedirs': shapedirs_sel,
479
+ 'z': pred_z,
480
+ 'flength_unnorm': pred_flength,
481
+ 'flength': output['flength'],
482
+ 'partseg_images_rend': partseg_images,
483
+ 'partseg_images_hg_nograd': partseg_images_hg,
484
+ 'normflow_z': output['normflow_z']}
485
+
486
+ return output, output_unnorm, output_reproj
487
+
488
+ def render_vis_nograd(self, vertices, focal_lengths, color=0):
489
+ # this function is for visualization only
490
+ # vertices: (bs, n_verts, 3)
491
+ # focal_lengths: (bs, 1)
492
+ # color: integer, either 0 or 1
493
+ # returns a torch tensor of shape (bs, image_size, image_size, 3)
494
+ with torch.no_grad():
495
+ batch_size = vertices.shape[0]
496
+ faces_prep = self.smal.faces.unsqueeze(0).expand((batch_size, -1, -1))
497
+ visualizations = self.silh_renderer.get_visualization_nograd(vertices,
498
+ faces_prep, focal_lengths, color=color)
499
+ return visualizations
500
+
src/combined_model/train_main_image_to_3d_withbreedrel.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.backends.cudnn
5
+ import torch.nn.parallel
6
+ from tqdm import tqdm
7
+ import os
8
+ import pathlib
9
+ from matplotlib import pyplot as plt
10
+ import cv2
11
+ import numpy as np
12
+ import torch
13
+ import trimesh
14
+
15
+ import sys
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
17
+ from stacked_hourglass.utils.evaluation import accuracy, AverageMeter, final_preds, get_preds, get_preds_soft
18
+ from stacked_hourglass.utils.visualization import save_input_image_with_keypoints, save_input_image
19
+ from metrics.metrics import Metrics
20
+ from configs.SMAL_configs import EVAL_KEYPOINTS, KEYPOINT_GROUPS
21
+
22
+
23
+ # ---------------------------------------------------------------------------------------------------------------------------
24
+ def do_training_epoch(train_loader, model, loss_module, device, data_info, optimiser, quiet=False, acc_joints=None, weight_dict=None):
25
+ losses = AverageMeter()
26
+ losses_keyp = AverageMeter()
27
+ losses_silh = AverageMeter()
28
+ losses_shape = AverageMeter()
29
+ losses_pose = AverageMeter()
30
+ losses_class = AverageMeter()
31
+ losses_breed = AverageMeter()
32
+ losses_partseg = AverageMeter()
33
+ accuracies = AverageMeter()
34
+ # Put the model in training mode.
35
+ model.train()
36
+ # prepare progress bar
37
+ iterable = enumerate(train_loader)
38
+ progress = None
39
+ if not quiet:
40
+ progress = tqdm(iterable, desc='Train', total=len(train_loader), ascii=True, leave=False)
41
+ iterable = progress
42
+ # information for normalization
43
+ norm_dict = {
44
+ 'pose_rot6d_mean': torch.from_numpy(data_info.pose_rot6d_mean).float().to(device),
45
+ 'trans_mean': torch.from_numpy(data_info.trans_mean).float().to(device),
46
+ 'trans_std': torch.from_numpy(data_info.trans_std).float().to(device),
47
+ 'flength_mean': torch.from_numpy(data_info.flength_mean).float().to(device),
48
+ 'flength_std': torch.from_numpy(data_info.flength_std).float().to(device)}
49
+ # prepare variables, put them on the right device
50
+ for i, (input, target_dict) in iterable:
51
+ batch_size = input.shape[0]
52
+ for key in target_dict.keys():
53
+ if key == 'breed_index':
54
+ target_dict[key] = target_dict[key].long().to(device)
55
+ elif key in ['index', 'pts', 'tpts', 'target_weight', 'silh', 'silh_distmat_tofg', 'silh_distmat_tobg', 'sim_breed_index', 'img_border_mask']:
56
+ target_dict[key] = target_dict[key].float().to(device)
57
+ elif key == 'has_seg':
58
+ target_dict[key] = target_dict[key].to(device)
59
+ else:
60
+ pass
61
+ input = input.float().to(device)
62
+
63
+ # ----------------------- do training step -----------------------
64
+ assert model.training, 'model must be in training mode.'
65
+ with torch.enable_grad():
66
+ # ----- forward pass -----
67
+ output, output_unnorm, output_reproj = model(input, norm_dict=norm_dict)
68
+ # ----- loss -----
69
+ loss, loss_dict = loss_module(output_reproj=output_reproj,
70
+ target_dict=target_dict,
71
+ weight_dict=weight_dict)
72
+ # ----- backward pass and parameter update -----
73
+ optimiser.zero_grad()
74
+ loss.backward()
75
+ optimiser.step()
76
+ # ----------------------------------------------------------------
77
+
78
+ # prepare losses for progress bar
79
+ bs_fake = 1 # batch_size
80
+ losses.update(loss_dict['loss'], bs_fake)
81
+ losses_keyp.update(loss_dict['loss_keyp_weighted'], bs_fake)
82
+ losses_silh.update(loss_dict['loss_silh_weighted'], bs_fake)
83
+ losses_shape.update(loss_dict['loss_shape_weighted'], bs_fake)
84
+ losses_pose.update(loss_dict['loss_poseprior_weighted'], bs_fake)
85
+ losses_class.update(loss_dict['loss_class_weighted'], bs_fake)
86
+ losses_breed.update(loss_dict['loss_breed_weighted'], bs_fake)
87
+ losses_partseg.update(loss_dict['loss_partseg_weighted'], bs_fake)
88
+ acc = - loss_dict['loss_keyp_weighted'] # this will be used to keep track of the 'best model'
89
+ accuracies.update(acc, bs_fake)
90
+ # Show losses as part of the progress bar.
91
+ if progress is not None:
92
+ my_string = 'Loss: {loss:0.4f}, loss_keyp: {loss_keyp:0.4f}, loss_silh: {loss_silh:0.4f}, loss_partseg: {loss_partseg:0.4f}, loss_shape: {loss_shape:0.4f}, loss_pose: {loss_pose:0.4f}, loss_class: {loss_class:0.4f}, loss_breed: {loss_breed:0.4f}'.format(
93
+ loss=losses.avg,
94
+ loss_keyp=losses_keyp.avg,
95
+ loss_silh=losses_silh.avg,
96
+ loss_shape=losses_shape.avg,
97
+ loss_pose=losses_pose.avg,
98
+ loss_class=losses_class.avg,
99
+ loss_breed=losses_breed.avg,
100
+ loss_partseg=losses_partseg.avg
101
+ )
102
+ progress.set_postfix_str(my_string)
103
+
104
+ return my_string, accuracies.avg
105
+
106
+
107
+ # ---------------------------------------------------------------------------------------------------------------------------
108
+ def do_validation_epoch(val_loader, model, loss_module, device, data_info, flip=False, quiet=False, acc_joints=None, save_imgs_path=None, weight_dict=None, metrics=None, val_opt='default', test_name_list=None, render_all=False, pck_thresh=0.15, len_dataset=None):
109
+ losses = AverageMeter()
110
+ losses_keyp = AverageMeter()
111
+ losses_silh = AverageMeter()
112
+ losses_shape = AverageMeter()
113
+ losses_pose = AverageMeter()
114
+ losses_class = AverageMeter()
115
+ losses_breed = AverageMeter()
116
+ losses_partseg = AverageMeter()
117
+ accuracies = AverageMeter()
118
+ if save_imgs_path is not None:
119
+ pathlib.Path(save_imgs_path).mkdir(parents=True, exist_ok=True)
120
+ # Put the model in evaluation mode.
121
+ model.eval()
122
+ # prepare progress bar
123
+ iterable = enumerate(val_loader)
124
+ progress = None
125
+ if not quiet:
126
+ progress = tqdm(iterable, desc='Valid', total=len(val_loader), ascii=True, leave=False)
127
+ iterable = progress
128
+ # summarize information for normalization
129
+ norm_dict = {
130
+ 'pose_rot6d_mean': torch.from_numpy(data_info.pose_rot6d_mean).float().to(device),
131
+ 'trans_mean': torch.from_numpy(data_info.trans_mean).float().to(device),
132
+ 'trans_std': torch.from_numpy(data_info.trans_std).float().to(device),
133
+ 'flength_mean': torch.from_numpy(data_info.flength_mean).float().to(device),
134
+ 'flength_std': torch.from_numpy(data_info.flength_std).float().to(device)}
135
+ batch_size = val_loader.batch_size
136
+ # prepare variables, put them on the right device
137
+ my_step = 0
138
+ for i, (input, target_dict) in iterable:
139
+ curr_batch_size = input.shape[0]
140
+ for key in target_dict.keys():
141
+ if key == 'breed_index':
142
+ target_dict[key] = target_dict[key].long().to(device)
143
+ elif key in ['index', 'pts', 'tpts', 'target_weight', 'silh', 'silh_distmat_tofg', 'silh_distmat_tobg', 'sim_breed_index', 'img_border_mask']:
144
+ target_dict[key] = target_dict[key].float().to(device)
145
+ elif key == 'has_seg':
146
+ target_dict[key] = target_dict[key].to(device)
147
+ else:
148
+ pass
149
+ input = input.float().to(device)
150
+
151
+ # ----------------------- do validation step -----------------------
152
+ with torch.no_grad():
153
+ # ----- forward pass -----
154
+ # output: (['pose', 'flength', 'trans', 'keypoints_norm', 'keypoints_scores'])
155
+ # output_unnorm: (['pose_rotmat', 'flength', 'trans', 'keypoints'])
156
+ # output_reproj: (['vertices_smal', 'torch_meshes', 'keyp_3d', 'keyp_2d', 'silh', 'betas', 'pose_rot6d', 'dog_breed', 'shapedirs', 'z', 'flength_unnorm', 'flength'])
157
+ # target_dict: (['index', 'center', 'scale', 'pts', 'tpts', 'target_weight', 'breed_index', 'sim_breed_index', 'ind_dataset', 'silh'])
158
+ output, output_unnorm, output_reproj = model(input, norm_dict=norm_dict)
159
+ # ----- loss -----
160
+ if metrics == 'no_loss':
161
+ loss, loss_dict = loss_module(output_reproj=output_reproj,
162
+ target_dict=target_dict,
163
+ weight_dict=weight_dict)
164
+ # ----------------------------------------------------------------
165
+
166
+ if i == 0:
167
+ if len_dataset is None:
168
+ len_data = val_loader.batch_size * len(val_loader) # 1703
169
+ else:
170
+ len_data = len_dataset
171
+ if metrics == 'all' or metrics == 'no_loss':
172
+ pck = np.zeros((len_data))
173
+ pck_by_part = {group:np.zeros((len_data)) for group in KEYPOINT_GROUPS}
174
+ acc_sil_2d = np.zeros(len_data)
175
+
176
+ all_betas = np.zeros((len_data, output_reproj['betas'].shape[1]))
177
+ all_betas_limbs = np.zeros((len_data, output_reproj['betas_limbs'].shape[1]))
178
+ all_z = np.zeros((len_data, output_reproj['z'].shape[1]))
179
+ all_pose_rotmat = np.zeros((len_data, output_unnorm['pose_rotmat'].shape[1], 3, 3))
180
+ all_flength = np.zeros((len_data, output_unnorm['flength'].shape[1]))
181
+ all_trans = np.zeros((len_data, output_unnorm['trans'].shape[1]))
182
+ all_breed_indices = np.zeros((len_data))
183
+ all_image_names = [] # len_data * [None]
184
+
185
+ index = i
186
+ ind_img = 0
187
+ if save_imgs_path is not None:
188
+ # render predicted 3d models
189
+ visualizations = model.render_vis_nograd(vertices=output_reproj['vertices_smal'],
190
+ focal_lengths=output_unnorm['flength'],
191
+ color=0) # color=2)
192
+ for ind_img in range(len(target_dict['index'])):
193
+ try:
194
+ if test_name_list is not None:
195
+ img_name = test_name_list[int(target_dict['index'][ind_img].cpu().detach().numpy())].replace('/', '_')
196
+ img_name = img_name.split('.')[0]
197
+ else:
198
+ img_name = str(index) + '_' + str(ind_img)
199
+ # save image with predicted keypoints
200
+ out_path = save_imgs_path + '/keypoints_pred_' + img_name + '.png'
201
+ pred_unp = (output['keypoints_norm'][ind_img, :, :] + 1.) / 2 * (data_info.image_size - 1)
202
+ pred_unp_maxval = output['keypoints_scores'][ind_img, :, :]
203
+ pred_unp_prep = torch.cat((pred_unp, pred_unp_maxval), 1)
204
+ inp_img = input[ind_img, :, :, :].detach().clone()
205
+ save_input_image_with_keypoints(inp_img, pred_unp_prep, out_path=out_path, threshold=0.1, print_scores=True, ratio_in_out=1.0) # threshold=0.3
206
+ # save predicted 3d model (front view)
207
+ pred_tex = visualizations[ind_img, :, :, :].permute((1, 2, 0)).cpu().detach().numpy() / 256
208
+ pred_tex_max = np.max(pred_tex, axis=2)
209
+ out_path = save_imgs_path + '/tex_pred_' + img_name + '.png'
210
+ plt.imsave(out_path, pred_tex)
211
+ input_image = input[ind_img, :, :, :].detach().clone()
212
+ for t, m, s in zip(input_image, data_info.rgb_mean, data_info.rgb_stddev): t.add_(m)
213
+ input_image_np = input_image.detach().cpu().numpy().transpose(1, 2, 0)
214
+ im_masked = cv2.addWeighted(input_image_np,0.2,pred_tex,0.8,0)
215
+ im_masked[pred_tex_max<0.01, :] = input_image_np[pred_tex_max<0.01, :]
216
+ out_path = save_imgs_path + '/comp_pred_' + img_name + '.png'
217
+ plt.imsave(out_path, im_masked)
218
+ # save predicted 3d model (side view)
219
+ vertices_cent = output_reproj['vertices_smal'] - output_reproj['vertices_smal'].mean(dim=1)[:, None, :]
220
+ roll = np.pi / 2 * torch.ones(1).float().to(device)
221
+ pitch = np.pi / 2 * torch.ones(1).float().to(device)
222
+ tensor_0 = torch.zeros(1).float().to(device)
223
+ tensor_1 = torch.ones(1).float().to(device)
224
+ RX = torch.stack([torch.stack([tensor_1, tensor_0, tensor_0]), torch.stack([tensor_0, torch.cos(roll), -torch.sin(roll)]),torch.stack([tensor_0, torch.sin(roll), torch.cos(roll)])]).reshape(3,3)
225
+ RY = torch.stack([
226
+ torch.stack([torch.cos(pitch), tensor_0, torch.sin(pitch)]),
227
+ torch.stack([tensor_0, tensor_1, tensor_0]),
228
+ torch.stack([-torch.sin(pitch), tensor_0, torch.cos(pitch)])]).reshape(3,3)
229
+ vertices_rot = (torch.matmul(RY, vertices_cent.reshape((-1, 3))[:, :, None])).reshape((curr_batch_size, -1, 3))
230
+ vertices_rot[:, :, 2] = vertices_rot[:, :, 2] + torch.ones_like(vertices_rot[:, :, 2]) * 20 # 18 # *16
231
+
232
+ visualizations_rot = model.render_vis_nograd(vertices=vertices_rot,
233
+ focal_lengths=output_unnorm['flength'],
234
+ color=0) # 2)
235
+ pred_tex = visualizations_rot[ind_img, :, :, :].permute((1, 2, 0)).cpu().detach().numpy() / 256
236
+ pred_tex_max = np.max(pred_tex, axis=2)
237
+ out_path = save_imgs_path + '/rot_tex_pred_' + img_name + '.png'
238
+ plt.imsave(out_path, pred_tex)
239
+ if render_all:
240
+ # save input image
241
+ inp_img = input[ind_img, :, :, :].detach().clone()
242
+ out_path = save_imgs_path + '/image_' + img_name + '.png'
243
+ save_input_image(inp_img, out_path)
244
+ # save mesh
245
+ V_posed = output_reproj['vertices_smal'][ind_img, :, :].detach().cpu().numpy()
246
+ Faces = model.smal.f
247
+ mesh_posed = trimesh.Trimesh(vertices=V_posed, faces=Faces, process=False)
248
+ mesh_posed.export(save_imgs_path + '/mesh_posed_' + img_name + '.obj')
249
+ except:
250
+ print('dont save an image')
251
+
252
+ if metrics == 'all' or metrics == 'no_loss':
253
+ # prepare a dictionary with all the predicted results
254
+ preds = {}
255
+ preds['betas'] = output_reproj['betas'].cpu().detach().numpy()
256
+ preds['betas_limbs'] = output_reproj['betas_limbs'].cpu().detach().numpy()
257
+ preds['z'] = output_reproj['z'].cpu().detach().numpy()
258
+ preds['pose_rotmat'] = output_unnorm['pose_rotmat'].cpu().detach().numpy()
259
+ preds['flength'] = output_unnorm['flength'].cpu().detach().numpy()
260
+ preds['trans'] = output_unnorm['trans'].cpu().detach().numpy()
261
+ preds['breed_index'] = target_dict['breed_index'].cpu().detach().numpy().reshape((-1))
262
+ img_names = []
263
+ for ind_img2 in range(0, output_reproj['betas'].shape[0]):
264
+ if test_name_list is not None:
265
+ img_name2 = test_name_list[int(target_dict['index'][ind_img2].cpu().detach().numpy())].replace('/', '_')
266
+ img_name2 = img_name2.split('.')[0]
267
+ else:
268
+ img_name2 = str(index) + '_' + str(ind_img2)
269
+ img_names.append(img_name2)
270
+ preds['image_names'] = img_names
271
+ # prepare keypoints for PCK calculation - predicted as well as ground truth
272
+ pred_keypoints_norm = output['keypoints_norm'] # -1 to 1
273
+ pred_keypoints_256 = output_reproj['keyp_2d']
274
+ pred_keypoints = pred_keypoints_256
275
+ gt_keypoints_256 = target_dict['tpts'][:, :, :2] / 64. * (256. - 1)
276
+ gt_keypoints_norm = gt_keypoints_256 / 256 / 0.5 - 1
277
+ gt_keypoints = torch.cat((gt_keypoints_256, target_dict['tpts'][:, :, 2:3]), dim=2) # gt_keypoints_norm
278
+ # prepare silhouette for IoU calculation - predicted as well as ground truth
279
+ has_seg = target_dict['has_seg']
280
+ img_border_mask = target_dict['img_border_mask'][:, 0, :, :]
281
+ gtseg = target_dict['silh']
282
+ synth_silhouettes = output_reproj['silh'][:, 0, :, :] # output_reproj['silh']
283
+ synth_silhouettes[synth_silhouettes>0.5] = 1
284
+ synth_silhouettes[synth_silhouettes<0.5] = 0
285
+ # calculate PCK as well as IoU (similar to WLDO)
286
+ preds['acc_PCK'] = Metrics.PCK(
287
+ pred_keypoints, gt_keypoints,
288
+ gtseg, has_seg, idxs=EVAL_KEYPOINTS,
289
+ thresh_range=[pck_thresh], # [0.15],
290
+ )
291
+ preds['acc_IOU'] = Metrics.IOU(
292
+ synth_silhouettes, gtseg,
293
+ img_border_mask, mask=has_seg
294
+ )
295
+ for group, group_kps in KEYPOINT_GROUPS.items():
296
+ preds[f'{group}_PCK'] = Metrics.PCK(
297
+ pred_keypoints, gt_keypoints, gtseg, has_seg,
298
+ thresh_range=[pck_thresh], # [0.15],
299
+ idxs=group_kps
300
+ )
301
+ # add results for all images in this batch to lists
302
+ curr_batch_size = pred_keypoints_256.shape[0]
303
+ if not (preds['acc_PCK'].data.cpu().numpy().shape == (pck[my_step * batch_size:my_step * batch_size + curr_batch_size]).shape):
304
+ import pdb; pdb.set_trace()
305
+ pck[my_step * batch_size:my_step * batch_size + curr_batch_size] = preds['acc_PCK'].data.cpu().numpy()
306
+ acc_sil_2d[my_step * batch_size:my_step * batch_size + curr_batch_size] = preds['acc_IOU'].data.cpu().numpy()
307
+ for part in pck_by_part:
308
+ pck_by_part[part][my_step * batch_size:my_step * batch_size + curr_batch_size] = preds[f'{part}_PCK'].data.cpu().numpy()
309
+ all_betas[my_step * batch_size:my_step * batch_size + curr_batch_size, ...] = preds['betas']
310
+ all_betas_limbs[my_step * batch_size:my_step * batch_size + curr_batch_size, ...] = preds['betas_limbs']
311
+ all_z[my_step * batch_size:my_step * batch_size + curr_batch_size, ...] = preds['z']
312
+ all_pose_rotmat[my_step * batch_size:my_step * batch_size + curr_batch_size, ...] = preds['pose_rotmat']
313
+ all_flength[my_step * batch_size:my_step * batch_size + curr_batch_size, ...] = preds['flength']
314
+ all_trans[my_step * batch_size:my_step * batch_size + curr_batch_size, ...] = preds['trans']
315
+ all_breed_indices[my_step * batch_size:my_step * batch_size + curr_batch_size] = preds['breed_index']
316
+ all_image_names.extend(preds['image_names'])
317
+ # update progress bar
318
+ if progress is not None:
319
+ my_string = "PCK: {0:.2f}, IOU: {1:.2f}".format(
320
+ pck[:(my_step * batch_size + curr_batch_size)].mean(),
321
+ acc_sil_2d[:(my_step * batch_size + curr_batch_size)].mean())
322
+ progress.set_postfix_str(my_string)
323
+ else:
324
+ # measure accuracy and record loss
325
+ bs_fake = 1 # batch_size
326
+ losses.update(loss_dict['loss'], bs_fake)
327
+ losses_keyp.update(loss_dict['loss_keyp_weighted'], bs_fake)
328
+ losses_silh.update(loss_dict['loss_silh_weighted'], bs_fake)
329
+ losses_shape.update(loss_dict['loss_shape_weighted'], bs_fake)
330
+ losses_pose.update(loss_dict['loss_poseprior_weighted'], bs_fake)
331
+ losses_class.update(loss_dict['loss_class_weighted'], bs_fake)
332
+ losses_breed.update(loss_dict['loss_breed_weighted'], bs_fake)
333
+ losses_partseg.update(loss_dict['loss_partseg_weighted'], bs_fake)
334
+ acc = - loss_dict['loss_keyp_weighted'] # this will be used to keep track of the 'best model'
335
+ accuracies.update(acc, bs_fake)
336
+ # Show losses as part of the progress bar.
337
+ if progress is not None:
338
+ my_string = 'Loss: {loss:0.4f}, loss_keyp: {loss_keyp:0.4f}, loss_silh: {loss_silh:0.4f}, loss_partseg: {loss_partseg:0.4f}, loss_shape: {loss_shape:0.4f}, loss_pose: {loss_pose:0.4f}, loss_class: {loss_class:0.4f}, loss_breed: {loss_breed:0.4f}'.format(
339
+ loss=losses.avg,
340
+ loss_keyp=losses_keyp.avg,
341
+ loss_silh=losses_silh.avg,
342
+ loss_shape=losses_shape.avg,
343
+ loss_pose=losses_pose.avg,
344
+ loss_class=losses_class.avg,
345
+ loss_breed=losses_breed.avg,
346
+ loss_partseg=losses_partseg.avg
347
+ )
348
+ progress.set_postfix_str(my_string)
349
+ my_step += 1
350
+ if metrics == 'all':
351
+ summary = {'pck': pck, 'acc_sil_2d': acc_sil_2d, 'pck_by_part':pck_by_part,
352
+ 'betas': all_betas, 'betas_limbs': all_betas_limbs, 'z': all_z, 'pose_rotmat': all_pose_rotmat,
353
+ 'flenght': all_flength, 'trans': all_trans, 'image_names': all_image_names, 'breed_indices': all_breed_indices}
354
+ return my_string, summary
355
+ elif metrics == 'no_loss':
356
+ return my_string, np.average(np.asarray(acc_sil_2d))
357
+ else:
358
+ return my_string, accuracies.avg
359
+
360
+
361
+ # ---------------------------------------------------------------------------------------------------------------------------
362
+ def do_visual_epoch(val_loader, model, device, data_info, flip=False, quiet=False, acc_joints=None, save_imgs_path=None, weight_dict=None, metrics=None, val_opt='default', test_name_list=None, render_all=False, pck_thresh=0.15, return_results=False):
363
+ if save_imgs_path is not None:
364
+ pathlib.Path(save_imgs_path).mkdir(parents=True, exist_ok=True)
365
+ all_results = []
366
+
367
+ # Put the model in evaluation mode.
368
+ model.eval()
369
+
370
+ iterable = enumerate(val_loader)
371
+
372
+ # information for normalization
373
+ norm_dict = {
374
+ 'pose_rot6d_mean': torch.from_numpy(data_info.pose_rot6d_mean).float().to(device),
375
+ 'trans_mean': torch.from_numpy(data_info.trans_mean).float().to(device),
376
+ 'trans_std': torch.from_numpy(data_info.trans_std).float().to(device),
377
+ 'flength_mean': torch.from_numpy(data_info.flength_mean).float().to(device),
378
+ 'flength_std': torch.from_numpy(data_info.flength_std).float().to(device)}
379
+
380
+ for i, (input, target_dict) in iterable:
381
+ batch_size = input.shape[0]
382
+ input = input.float().to(device)
383
+ partial_results = {}
384
+
385
+ # ----------------------- do visualization step -----------------------
386
+ with torch.no_grad():
387
+ output, output_unnorm, output_reproj = model(input, norm_dict=norm_dict)
388
+
389
+ index = i
390
+ ind_img = 0
391
+ for ind_img in range(batch_size): # range(min(12, batch_size)): # range(12): # [0]: #range(0, batch_size):
392
+
393
+ try:
394
+ if test_name_list is not None:
395
+ img_name = test_name_list[int(target_dict['index'][ind_img].cpu().detach().numpy())].replace('/', '_')
396
+ img_name = img_name.split('.')[0]
397
+ else:
398
+ img_name = str(index) + '_' + str(ind_img)
399
+ partial_results['img_name'] = img_name
400
+ visualizations = model.render_vis_nograd(vertices=output_reproj['vertices_smal'],
401
+ focal_lengths=output_unnorm['flength'],
402
+ color=0) # 2)
403
+ # save image with predicted keypoints
404
+ pred_unp = (output['keypoints_norm'][ind_img, :, :] + 1.) / 2 * (data_info.image_size - 1)
405
+ pred_unp_maxval = output['keypoints_scores'][ind_img, :, :]
406
+ pred_unp_prep = torch.cat((pred_unp, pred_unp_maxval), 1)
407
+ inp_img = input[ind_img, :, :, :].detach().clone()
408
+ if save_imgs_path is not None:
409
+ out_path = save_imgs_path + '/keypoints_pred_' + img_name + '.png'
410
+ save_input_image_with_keypoints(inp_img, pred_unp_prep, out_path=out_path, threshold=0.1, print_scores=True, ratio_in_out=1.0) # threshold=0.3
411
+ # save predicted 3d model
412
+ # (1) front view
413
+ pred_tex = visualizations[ind_img, :, :, :].permute((1, 2, 0)).cpu().detach().numpy() / 256
414
+ pred_tex_max = np.max(pred_tex, axis=2)
415
+ partial_results['tex_pred'] = pred_tex
416
+ if save_imgs_path is not None:
417
+ out_path = save_imgs_path + '/tex_pred_' + img_name + '.png'
418
+ plt.imsave(out_path, pred_tex)
419
+ input_image = input[ind_img, :, :, :].detach().clone()
420
+ for t, m, s in zip(input_image, data_info.rgb_mean, data_info.rgb_stddev): t.add_(m)
421
+ input_image_np = input_image.detach().cpu().numpy().transpose(1, 2, 0)
422
+ im_masked = cv2.addWeighted(input_image_np,0.2,pred_tex,0.8,0)
423
+ im_masked[pred_tex_max<0.01, :] = input_image_np[pred_tex_max<0.01, :]
424
+ partial_results['comp_pred'] = im_masked
425
+ if save_imgs_path is not None:
426
+ out_path = save_imgs_path + '/comp_pred_' + img_name + '.png'
427
+ plt.imsave(out_path, im_masked)
428
+ # (2) side view
429
+ vertices_cent = output_reproj['vertices_smal'] - output_reproj['vertices_smal'].mean(dim=1)[:, None, :]
430
+ roll = np.pi / 2 * torch.ones(1).float().to(device)
431
+ pitch = np.pi / 2 * torch.ones(1).float().to(device)
432
+ tensor_0 = torch.zeros(1).float().to(device)
433
+ tensor_1 = torch.ones(1).float().to(device)
434
+ RX = torch.stack([torch.stack([tensor_1, tensor_0, tensor_0]), torch.stack([tensor_0, torch.cos(roll), -torch.sin(roll)]),torch.stack([tensor_0, torch.sin(roll), torch.cos(roll)])]).reshape(3,3)
435
+ RY = torch.stack([
436
+ torch.stack([torch.cos(pitch), tensor_0, torch.sin(pitch)]),
437
+ torch.stack([tensor_0, tensor_1, tensor_0]),
438
+ torch.stack([-torch.sin(pitch), tensor_0, torch.cos(pitch)])]).reshape(3,3)
439
+ vertices_rot = (torch.matmul(RY, vertices_cent.reshape((-1, 3))[:, :, None])).reshape((batch_size, -1, 3))
440
+ vertices_rot[:, :, 2] = vertices_rot[:, :, 2] + torch.ones_like(vertices_rot[:, :, 2]) * 20 # 18 # *16
441
+ visualizations_rot = model.render_vis_nograd(vertices=vertices_rot,
442
+ focal_lengths=output_unnorm['flength'],
443
+ color=0) # 2)
444
+ pred_tex = visualizations_rot[ind_img, :, :, :].permute((1, 2, 0)).cpu().detach().numpy() / 256
445
+ pred_tex_max = np.max(pred_tex, axis=2)
446
+ partial_results['rot_tex_pred'] = pred_tex
447
+ if save_imgs_path is not None:
448
+ out_path = save_imgs_path + '/rot_tex_pred_' + img_name + '.png'
449
+ plt.imsave(out_path, pred_tex)
450
+ render_all = True
451
+ if render_all:
452
+ # save input image
453
+ inp_img = input[ind_img, :, :, :].detach().clone()
454
+ if save_imgs_path is not None:
455
+ out_path = save_imgs_path + '/image_' + img_name + '.png'
456
+ save_input_image(inp_img, out_path)
457
+ # save posed mesh
458
+ V_posed = output_reproj['vertices_smal'][ind_img, :, :].detach().cpu().numpy()
459
+ Faces = model.smal.f
460
+ mesh_posed = trimesh.Trimesh(vertices=V_posed, faces=Faces, process=False)
461
+ partial_results['mesh_posed'] = mesh_posed
462
+ if save_imgs_path is not None:
463
+ mesh_posed.export(save_imgs_path + '/mesh_posed_' + img_name + '.obj')
464
+ except:
465
+ print('pass...')
466
+ all_results.append(partial_results)
467
+ if return_results:
468
+ return all_results
469
+ else:
470
+ return
src/configs/SMAL_configs.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import numpy as np
4
+ import os
5
+ import sys
6
+
7
+
8
+ # SMAL_DATA_DIR = '/is/cluster/work/nrueegg/dog_project/pytorch-dogs-inference/src/smal_pytorch/smpl_models/'
9
+ # SMAL_DATA_DIR = os.path.join(os.path.dirname(__file__), '..', 'smal_pytorch', 'smal_data')
10
+ SMAL_DATA_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'smal_data')
11
+
12
+ # we replace the old SMAL model by a more dog specific model (see BARC cvpr 2022 paper)
13
+ # our model has several differences compared to the original SMAL model, some of them are:
14
+ # - the PCA shape space is recalculated (from partially new data and weighted)
15
+ # - coefficients for limb length changes are allowed (similar to WLDO, we did borrow some of their code)
16
+ # - all dogs have a core of approximately the same length
17
+ # - dogs are centered in their root joint (which is close to the tail base)
18
+ # -> like this the root rotations is always around this joint AND (0, 0, 0)
19
+ # -> before this it would happen that the animal 'slips' from the image middle to the side when rotating it. Now
20
+ # 'trans' also defines the center of the rotation
21
+ # - we correct the back joint locations such that all those joints are more aligned
22
+ SMAL_MODEL_PATH = os.path.join(SMAL_DATA_DIR, 'my_smpl_SMBLD_nbj_v3.pkl')
23
+ UNITY_SMAL_SHAPE_PRIOR_DOGS = os.path.join(SMAL_DATA_DIR, 'my_smpl_data_SMBLD_v3.pkl')
24
+
25
+ SYMMETRY_INDS_FILE = os.path.join(SMAL_DATA_DIR, 'symmetry_inds.json')
26
+
27
+ mean_dog_bone_lengths_txt = os.path.join(SMAL_DATA_DIR, 'mean_dog_bone_lengths.txt')
28
+
29
+ # there exist different keypoint configurations, for example keypoints corresponding to SMAL joints or keypoints defined based on vertex locations
30
+ KEYPOINT_CONFIGURATION = 'green' # green: same as in https://github.com/benjiebob/SMALify/blob/master/config.py
31
+
32
+ # some vertex indices, (from silvia zuffiยดs code, create_projected_images_cats.py)
33
+ KEY_VIDS = np.array(([1068, 1080, 1029, 1226], # left eye
34
+ [2660, 3030, 2675, 3038], # right eye
35
+ [910], # mouth low
36
+ [360, 1203, 1235, 1230], # front left leg, low
37
+ [3188, 3156, 2327, 3183], # front right leg, low
38
+ [1976, 1974, 1980, 856], # back left leg, low
39
+ [3854, 2820, 3852, 3858], # back right leg, low
40
+ [452, 1811], # tail start
41
+ [416, 235, 182], # front left leg, top
42
+ [2156, 2382, 2203], # front right leg, top
43
+ [829], # back left leg, top
44
+ [2793], # back right leg, top
45
+ [60, 114, 186, 59], # throat, close to base of neck
46
+ [2091, 2037, 2036, 2160], # withers (a bit lower than in reality)
47
+ [384, 799, 1169, 431], # front left leg, middle
48
+ [2351, 2763, 2397, 3127], # front right leg, middle
49
+ [221, 104], # back left leg, middle
50
+ [2754, 2192], # back right leg, middle
51
+ [191, 1158, 3116, 2165], # neck
52
+ [28], # Tail tip
53
+ [542], # Left Ear
54
+ [2507], # Right Ear
55
+ [1039, 1845, 1846, 1870, 1879, 1919, 2997, 3761, 3762], # nose tip
56
+ [0, 464, 465, 726, 1824, 2429, 2430, 2690]), dtype=object) # half tail
57
+
58
+ # the following vertices are used for visibility only: if one of the vertices is visible,
59
+ # then we assume that the joint is visible! There is some noise, but we don't care, as this is
60
+ # for generation of the synthetic dataset only
61
+ KEY_VIDS_VISIBILITY_ONLY = np.array(([1068, 1080, 1029, 1226, 645], # left eye
62
+ [2660, 3030, 2675, 3038, 2567], # right eye
63
+ [910, 11, 5], # mouth low
64
+ [360, 1203, 1235, 1230, 298, 408, 303, 293, 384], # front left leg, low
65
+ [3188, 3156, 2327, 3183, 2261, 2271, 2573, 2265], # front right leg, low
66
+ [1976, 1974, 1980, 856, 559, 851, 556], # back left leg, low
67
+ [3854, 2820, 3852, 3858, 2524, 2522, 2815, 2072], # back right leg, low
68
+ [452, 1811, 63, 194, 52, 370, 64], # tail start
69
+ [416, 235, 182, 440, 8, 80, 73, 112], # front left leg, top
70
+ [2156, 2382, 2203, 2050, 2052, 2406, 3], # front right leg, top
71
+ [829, 219, 218, 173, 17, 7, 279], # back left leg, top
72
+ [2793, 582, 140, 87, 2188, 2147, 2063], # back right leg, top
73
+ [60, 114, 186, 59, 878, 130, 189, 45], # throat, close to base of neck
74
+ [2091, 2037, 2036, 2160, 190, 2164], # withers (a bit lower than in reality)
75
+ [384, 799, 1169, 431, 321, 314, 437, 310, 323], # front left leg, middle
76
+ [2351, 2763, 2397, 3127, 2278, 2285, 2282, 2275, 2359], # front right leg, middle
77
+ [221, 104, 105, 97, 103], # back left leg, middle
78
+ [2754, 2192, 2080, 2251, 2075, 2074], # back right leg, middle
79
+ [191, 1158, 3116, 2165, 154, 653, 133, 339], # neck
80
+ [28, 474, 475, 731, 24], # Tail tip
81
+ [542, 147, 509, 200, 522], # Left Ear
82
+ [2507,2174, 2122, 2126, 2474], # Right Ear
83
+ [1039, 1845, 1846, 1870, 1879, 1919, 2997, 3761, 3762], # nose tip
84
+ [0, 464, 465, 726, 1824, 2429, 2430, 2690]), dtype=object) # half tail
85
+
86
+ # see: https://github.com/benjiebob/SMALify/blob/master/config.py
87
+ # JOINT DEFINITIONS - based on SMAL joints and additional {eyes, ear tips, chin and nose}
88
+ TORSO_JOINTS = [2, 5, 8, 11, 12, 23]
89
+ CANONICAL_MODEL_JOINTS = [
90
+ 10, 9, 8, # upper_left [paw, middle, top]
91
+ 20, 19, 18, # lower_left [paw, middle, top]
92
+ 14, 13, 12, # upper_right [paw, middle, top]
93
+ 24, 23, 22, # lower_right [paw, middle, top]
94
+ 25, 31, # tail [start, end]
95
+ 33, 34, # ear base [left, right]
96
+ 35, 36, # nose, chin
97
+ 38, 37, # ear tip [left, right]
98
+ 39, 40, # eyes [left, right]
99
+ 6, 11, # withers, throat (throat is inaccurate and withers also)
100
+ 28] # tail middle
101
+ # old: 15, 15, # withers, throat (TODO: Labelled same as throat for now), throat
102
+
103
+
104
+
105
+ # the following list gives the indices of the KEY_VIDS_JOINTS that must be taken in order
106
+ # to judge if the CANONICAL_MODEL_JOINTS are visible - those are all approximations!
107
+ CMJ_VISIBILITY_IN_KEY_VIDS = [
108
+ 3, 14, 8, # left front leg
109
+ 5, 16, 10, # left rear leg
110
+ 4, 15, 9, # right front leg
111
+ 6, 17, 11, # right rear leg
112
+ 7, 19, # tail front, tail back
113
+ 20, 21, # ear base (but can not be found in blue, se we take the tip)
114
+ 2, 2, # mouth (was: 22, 2)
115
+ 20, 21, # ear tips
116
+ 1, 0, # eyes
117
+ 18, # withers, not sure where this point is
118
+ 12, # throat
119
+ 23, # mid tail
120
+ ]
121
+
122
+ # define which bone lengths are used as input to the 2d-to-3d network
123
+ IDXS_BONES_NO_REDUNDANCY = [6,7,8,9,16,17,18,19,32,1,2,3,4,5,14,15,24,25,26,27,28,29,30,31]
124
+ # load bone lengths of the mean dog (already filtered)
125
+ mean_dog_bone_lengths = []
126
+ with open(mean_dog_bone_lengths_txt, 'r') as f:
127
+ for line in f:
128
+ mean_dog_bone_lengths.append(float(line.split('\n')[0]))
129
+ MEAN_DOG_BONE_LENGTHS_NO_RED = np.asarray(mean_dog_bone_lengths)[IDXS_BONES_NO_REDUNDANCY] # (24, )
130
+
131
+ # Body part segmentation:
132
+ # the body can be segmented based on the bones and for the new dog model also based on the new shapedirs
133
+ # axis_horizontal = self.shapedirs[2, :].reshape((-1, 3))[:, 0]
134
+ # all_indices = np.arange(3889)
135
+ # tail_indices = all_indices[axis_horizontal.detach().cpu().numpy() < 0.0]
136
+ VERTEX_IDS_TAIL = [ 0, 4, 9, 10, 24, 25, 28, 453, 454, 456, 457,
137
+ 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468,
138
+ 469, 470, 471, 472, 473, 474, 475, 724, 725, 726, 727,
139
+ 728, 729, 730, 731, 813, 975, 976, 977, 1109, 1110, 1111,
140
+ 1811, 1813, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827,
141
+ 1828, 1835, 1836, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967,
142
+ 1968, 1969, 2418, 2419, 2421, 2422, 2423, 2424, 2425, 2426, 2427,
143
+ 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438,
144
+ 2439, 2440, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2777,
145
+ 3067, 3068, 3069, 3842, 3843, 3844, 3845, 3846, 3847]
146
+
147
+ # same as in https://github.com/benjiebob/WLDO/blob/master/global_utils/config.py
148
+ EVAL_KEYPOINTS = [
149
+ 0, 1, 2, # left front
150
+ 3, 4, 5, # left rear
151
+ 6, 7, 8, # right front
152
+ 9, 10, 11, # right rear
153
+ 12, 13, # tail start -> end
154
+ 14, 15, # left ear, right ear
155
+ 16, 17, # nose, chin
156
+ 18, 19] # left tip, right tip
157
+
158
+ KEYPOINT_GROUPS = {
159
+ 'legs': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], # legs
160
+ 'tail': [12, 13], # tail
161
+ 'ears': [14, 15, 18, 19], # ears
162
+ 'face': [16, 17] # face
163
+ }
164
+
165
+
src/configs/anipose_data_info.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List
3
+ import json
4
+ import numpy as np
5
+ import os
6
+
7
+ STATISTICS_DATA_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'statistics')
8
+ STATISTICS_PATH = os.path.join(STATISTICS_DATA_DIR, 'statistics_modified_v1.json')
9
+
10
+ @dataclass
11
+ class DataInfo:
12
+ rgb_mean: List[float]
13
+ rgb_stddev: List[float]
14
+ joint_names: List[str]
15
+ hflip_indices: List[int]
16
+ n_joints: int
17
+ n_keyp: int
18
+ n_bones: int
19
+ n_betas: int
20
+ image_size: int
21
+ trans_mean: np.ndarray
22
+ trans_std: np.ndarray
23
+ flength_mean: np.ndarray
24
+ flength_std: np.ndarray
25
+ pose_rot6d_mean: np.ndarray
26
+ keypoint_weights: List[float]
27
+
28
+ # SMAL samples 3d statistics
29
+ # statistics like mean values were calculated once when the project was started and they were not changed afterwards anymore
30
+ def load_statistics(statistics_path):
31
+ with open(statistics_path) as f:
32
+ statistics = json.load(f)
33
+ '''new_pose_mean = [[[np.round(val, 2) for val in sublst] for sublst in sublst_big] for sublst_big in statistics['pose_mean']]
34
+ statistics['pose_mean'] = new_pose_mean
35
+ j_out = json.dumps(statistics, indent=4) #, sort_keys=True)
36
+ with open(self.statistics_path, 'w') as file: file.write(j_out)'''
37
+ new_statistics = {'trans_mean': np.asarray(statistics['trans_mean']),
38
+ 'trans_std': np.asarray(statistics['trans_std']),
39
+ 'flength_mean': np.asarray(statistics['flength_mean']),
40
+ 'flength_std': np.asarray(statistics['flength_std']),
41
+ 'pose_mean': np.asarray(statistics['pose_mean']),
42
+ }
43
+ new_statistics['pose_rot6d_mean'] = new_statistics['pose_mean'][:, :, :2].reshape((-1, 6))
44
+ return new_statistics
45
+ STATISTICS = load_statistics(STATISTICS_PATH)
46
+
47
+ AniPose_JOINT_NAMES_swapped = [
48
+ 'L_F_Paw', 'L_F_Knee', 'L_F_Elbow',
49
+ 'L_B_Paw', 'L_B_Knee', 'L_B_Elbow',
50
+ 'R_F_Paw', 'R_F_Knee', 'R_F_Elbow',
51
+ 'R_B_Paw', 'R_B_Knee', 'R_B_Elbow',
52
+ 'TailBase', '_Tail_end_', 'L_EarBase', 'R_EarBase',
53
+ 'Nose', '_Chin_', '_Left_ear_tip_', '_Right_ear_tip_',
54
+ 'L_Eye', 'R_Eye', 'Withers', 'Throat']
55
+
56
+ KEYPOINT_WEIGHTS = [3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 3, 1, 2, 2]
57
+
58
+ COMPLETE_DATA_INFO = DataInfo(
59
+ rgb_mean=[0.4404, 0.4440, 0.4327], # not sure
60
+ rgb_stddev=[0.2458, 0.2410, 0.2468], # not sure
61
+ joint_names=AniPose_JOINT_NAMES_swapped, # AniPose_JOINT_NAMES,
62
+ hflip_indices=[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 15, 14, 16, 17, 19, 18, 21, 20, 22, 23],
63
+ n_joints = 35,
64
+ n_keyp = 24, # 20, # 25,
65
+ n_bones = 24,
66
+ n_betas = 30, # 10,
67
+ image_size = 256,
68
+ trans_mean = STATISTICS['trans_mean'],
69
+ trans_std = STATISTICS['trans_std'],
70
+ flength_mean = STATISTICS['flength_mean'],
71
+ flength_std = STATISTICS['flength_std'],
72
+ pose_rot6d_mean = STATISTICS['pose_rot6d_mean'],
73
+ keypoint_weights = KEYPOINT_WEIGHTS
74
+ )
src/configs/barc_cfg_defaults.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from yacs.config import CfgNode as CN
3
+ import argparse
4
+ import yaml
5
+ import os
6
+
7
+ abs_barc_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',))
8
+
9
+ _C = CN()
10
+ _C.barc_dir = abs_barc_dir
11
+ _C.device = 'cuda'
12
+
13
+ ## path settings
14
+ _C.paths = CN()
15
+ _C.paths.ROOT_OUT_PATH = abs_barc_dir + '/results/'
16
+ _C.paths.ROOT_CHECKPOINT_PATH = abs_barc_dir + '/checkpoint/'
17
+ _C.paths.MODELPATH_NORMFLOW = abs_barc_dir + '/checkpoint/barc_normflow_pret/rgbddog_v3_model.pt'
18
+
19
+ ## parameter settings
20
+ _C.params = CN()
21
+ _C.params.ARCH = 'hg8'
22
+ _C.params.STRUCTURE_POSE_NET = 'normflow' # 'default' # 'vae'
23
+ _C.params.NF_VERSION = 3
24
+ _C.params.N_JOINTS = 35
25
+ _C.params.N_KEYP = 24 #20
26
+ _C.params.N_SEG = 2
27
+ _C.params.N_PARTSEG = 15
28
+ _C.params.UPSAMPLE_SEG = True
29
+ _C.params.ADD_PARTSEG = True # partseg: for the CVPR paper this part of the network exists, but is not trained (no part labels in StanExt)
30
+ _C.params.N_BETAS = 30 # 10
31
+ _C.params.N_BETAS_LIMBS = 7
32
+ _C.params.N_BONES = 24
33
+ _C.params.N_BREEDS = 121 # 120 breeds plus background
34
+ _C.params.IMG_SIZE = 256
35
+ _C.params.SILH_NO_TAIL = False
36
+ _C.params.KP_THRESHOLD = None
37
+ _C.params.ADD_Z_TO_3D_INPUT = False
38
+ _C.params.N_SEGBPS = 64*2
39
+ _C.params.ADD_SEGBPS_TO_3D_INPUT = True
40
+ _C.params.FIX_FLENGTH = False
41
+ _C.params.RENDER_ALL = True
42
+ _C.params.VLIN = 2
43
+ _C.params.STRUCTURE_Z_TO_B = 'lin'
44
+ _C.params.N_Z_FREE = 64
45
+ _C.params.PCK_THRESH = 0.15
46
+
47
+ ## optimization settings
48
+ _C.optim = CN()
49
+ _C.optim.LR = 5e-4
50
+ _C.optim.SCHEDULE = [150, 175, 200]
51
+ _C.optim.GAMMA = 0.1
52
+ _C.optim.MOMENTUM = 0
53
+ _C.optim.WEIGHT_DECAY = 0
54
+ _C.optim.EPOCHS = 220
55
+ _C.optim.BATCH_SIZE = 12 # keep 12 (needs to be an even number, as we have a custom data sampler)
56
+ _C.optim.TRAIN_PARTS = 'all_without_shapedirs'
57
+
58
+ ## dataset settings
59
+ _C.data = CN()
60
+ _C.data.DATASET = 'stanext24'
61
+ _C.data.V12 = True
62
+ _C.data.SHORTEN_VAL_DATASET_TO = None
63
+ _C.data.VAL_OPT = 'val'
64
+ _C.data.VAL_METRICS = 'no_loss'
65
+
66
+ # ---------------------------------------
67
+ def update_dependent_vars(cfg):
68
+ cfg.params.N_CLASSES = cfg.params.N_KEYP + cfg.params.N_SEG
69
+ if cfg.params.VLIN == 0:
70
+ cfg.params.NUM_STAGE_COMB = 2
71
+ cfg.params.NUM_STAGE_HEADS = 1
72
+ cfg.params.NUM_STAGE_HEADS_POSE = 1
73
+ cfg.params.TRANS_SEP = False
74
+ elif cfg.params.VLIN == 1:
75
+ cfg.params.NUM_STAGE_COMB = 3
76
+ cfg.params.NUM_STAGE_HEADS = 1
77
+ cfg.params.NUM_STAGE_HEADS_POSE = 2
78
+ cfg.params.TRANS_SEP = False
79
+ elif cfg.params.VLIN == 2:
80
+ cfg.params.NUM_STAGE_COMB = 3
81
+ cfg.params.NUM_STAGE_HEADS = 1
82
+ cfg.params.NUM_STAGE_HEADS_POSE = 2
83
+ cfg.params.TRANS_SEP = True
84
+ else:
85
+ raise NotImplementedError
86
+ if cfg.params.STRUCTURE_Z_TO_B == '1dconv':
87
+ cfg.params.N_Z = cfg.params.N_BETAS + cfg.params.N_BETAS_LIMBS
88
+ else:
89
+ cfg.params.N_Z = cfg.params.N_Z_FREE
90
+ return
91
+
92
+
93
+ update_dependent_vars(_C)
94
+ global _cfg_global
95
+ _cfg_global = _C.clone()
96
+
97
+
98
+ def get_cfg_defaults():
99
+ # Get a yacs CfgNode object with default values as defined within this file.
100
+ # Return a clone so that the defaults will not be altered.
101
+ return _C.clone()
102
+
103
+ def update_cfg_global_with_yaml(cfg_yaml_file):
104
+ _cfg_global.merge_from_file(cfg_yaml_file)
105
+ update_dependent_vars(_cfg_global)
106
+ return
107
+
108
+ def get_cfg_global_updated():
109
+ # return _cfg_global.clone()
110
+ return _cfg_global
111
+
src/configs/barc_loss_weights.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ {
5
+ "breed_options": [
6
+ "4"
7
+ ],
8
+ "breed": 5.0,
9
+ "class": 1.0,
10
+ "models3d": 1.0,
11
+ "keyp": 0.2,
12
+ "silh": 50.0,
13
+ "shape_options": [
14
+ "smal",
15
+ "limbs7"
16
+ ],
17
+ "shape": [
18
+ 1e-05,
19
+ 1
20
+ ],
21
+ "poseprior_options": [
22
+ "normalizing_flow_tiger_logprob"
23
+ ],
24
+ "poseprior": 0.1,
25
+ "poselegssidemovement": 10.0,
26
+ "flength": 1.0,
27
+ "partseg": 0,
28
+ "shapedirs": 0,
29
+ "pose_0": 0.0
30
+ }
src/configs/data_info.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List
3
+ import json
4
+ import numpy as np
5
+ import os
6
+ import sys
7
+
8
+ STATISTICS_DATA_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'statistics')
9
+ STATISTICS_PATH = os.path.join(STATISTICS_DATA_DIR, 'statistics_modified_v1.json')
10
+
11
+ @dataclass
12
+ class DataInfo:
13
+ rgb_mean: List[float]
14
+ rgb_stddev: List[float]
15
+ joint_names: List[str]
16
+ hflip_indices: List[int]
17
+ n_joints: int
18
+ n_keyp: int
19
+ n_bones: int
20
+ n_betas: int
21
+ image_size: int
22
+ trans_mean: np.ndarray
23
+ trans_std: np.ndarray
24
+ flength_mean: np.ndarray
25
+ flength_std: np.ndarray
26
+ pose_rot6d_mean: np.ndarray
27
+ keypoint_weights: List[float]
28
+
29
+ # SMAL samples 3d statistics
30
+ # statistics like mean values were calculated once when the project was started and they were not changed afterwards anymore
31
+ def load_statistics(statistics_path):
32
+ with open(statistics_path) as f:
33
+ statistics = json.load(f)
34
+ '''new_pose_mean = [[[np.round(val, 2) for val in sublst] for sublst in sublst_big] for sublst_big in statistics['pose_mean']]
35
+ statistics['pose_mean'] = new_pose_mean
36
+ j_out = json.dumps(statistics, indent=4) #, sort_keys=True)
37
+ with open(self.statistics_path, 'w') as file: file.write(j_out)'''
38
+ new_statistics = {'trans_mean': np.asarray(statistics['trans_mean']),
39
+ 'trans_std': np.asarray(statistics['trans_std']),
40
+ 'flength_mean': np.asarray(statistics['flength_mean']),
41
+ 'flength_std': np.asarray(statistics['flength_std']),
42
+ 'pose_mean': np.asarray(statistics['pose_mean']),
43
+ }
44
+ new_statistics['pose_rot6d_mean'] = new_statistics['pose_mean'][:, :, :2].reshape((-1, 6))
45
+ return new_statistics
46
+ STATISTICS = load_statistics(STATISTICS_PATH)
47
+
48
+
49
+ ############################################################################
50
+ # for StanExt (original number of keypoints, 20 not 24)
51
+
52
+ # for keypoint names see: https://github.com/benjiebob/StanfordExtra/blob/master/keypoint_definitions.csv
53
+ StanExt_JOINT_NAMES = [
54
+ 'Left_front_leg_paw', 'Left_front_leg_middle_joint', 'Left_front_leg_top',
55
+ 'Left_rear_leg_paw', 'Left_rear_leg_middle_joint', 'Left_rear_leg_top',
56
+ 'Right_front_leg_paw', 'Right_front_leg_middle_joint', 'Right_front_leg_top',
57
+ 'Right_rear_leg_paw', 'Right_rear_leg_middle_joint', 'Right_rear_leg_top',
58
+ 'Tail_start', 'Tail_end', 'Base_of_left_ear', 'Base_of_right_ear',
59
+ 'Nose', 'Chin', 'Left_ear_tip', 'Right_ear_tip']
60
+
61
+ KEYPOINT_WEIGHTS = [3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 3, 1, 2, 2]
62
+
63
+ COMPLETE_DATA_INFO = DataInfo(
64
+ rgb_mean=[0.4404, 0.4440, 0.4327], # not sure
65
+ rgb_stddev=[0.2458, 0.2410, 0.2468], # not sure
66
+ joint_names=StanExt_JOINT_NAMES,
67
+ hflip_indices=[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 15, 14, 16, 17, 19, 18],
68
+ n_joints = 35,
69
+ n_keyp = 20, # 25,
70
+ n_bones = 24,
71
+ n_betas = 30, # 10,
72
+ image_size = 256,
73
+ trans_mean = STATISTICS['trans_mean'],
74
+ trans_std = STATISTICS['trans_std'],
75
+ flength_mean = STATISTICS['flength_mean'],
76
+ flength_std = STATISTICS['flength_std'],
77
+ pose_rot6d_mean = STATISTICS['pose_rot6d_mean'],
78
+ keypoint_weights = KEYPOINT_WEIGHTS
79
+ )
80
+
81
+
82
+ ############################################################################
83
+ # new for StanExt24
84
+
85
+ # ..., 'Left_eye', 'Right_eye', 'Withers', 'Throat'] # the last 4 keypoints are in the animal_pose dataset, but not StanfordExtra
86
+ StanExt_JOINT_NAMES_24 = [
87
+ 'Left_front_leg_paw', 'Left_front_leg_middle_joint', 'Left_front_leg_top',
88
+ 'Left_rear_leg_paw', 'Left_rear_leg_middle_joint', 'Left_rear_leg_top',
89
+ 'Right_front_leg_paw', 'Right_front_leg_middle_joint', 'Right_front_leg_top',
90
+ 'Right_rear_leg_paw', 'Right_rear_leg_middle_joint', 'Right_rear_leg_top',
91
+ 'Tail_start', 'Tail_end', 'Base_of_left_ear', 'Base_of_right_ear',
92
+ 'Nose', 'Chin', 'Left_ear_tip', 'Right_ear_tip',
93
+ 'Left_eye', 'Right_eye', 'Withers', 'Throat']
94
+
95
+ KEYPOINT_WEIGHTS_24 = [3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 2, 2, 3, 3, 2, 2, 3, 1, 2, 2, 1, 1, 0, 0]
96
+
97
+ COMPLETE_DATA_INFO_24 = DataInfo(
98
+ rgb_mean=[0.4404, 0.4440, 0.4327], # not sure
99
+ rgb_stddev=[0.2458, 0.2410, 0.2468], # not sure
100
+ joint_names=StanExt_JOINT_NAMES_24,
101
+ hflip_indices=[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 15, 14, 16, 17, 19, 18, 21, 20, 22, 23],
102
+ n_joints = 35,
103
+ n_keyp = 24, # 20, # 25,
104
+ n_bones = 24,
105
+ n_betas = 30, # 10,
106
+ image_size = 256,
107
+ trans_mean = STATISTICS['trans_mean'],
108
+ trans_std = STATISTICS['trans_std'],
109
+ flength_mean = STATISTICS['flength_mean'],
110
+ flength_std = STATISTICS['flength_std'],
111
+ pose_rot6d_mean = STATISTICS['pose_rot6d_mean'],
112
+ keypoint_weights = KEYPOINT_WEIGHTS_24
113
+ )
114
+
115
+
src/configs/dataset_path_configs.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import numpy as np
4
+ import os
5
+ import sys
6
+
7
+ abs_barc_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',))
8
+
9
+ # stanext dataset
10
+ # (1) path to stanext dataset
11
+ STAN_V12_ROOT_DIR = abs_barc_dir + '/datasets/StanfordExtra_V12/'
12
+ IMG_V12_DIR = os.path.join(STAN_V12_ROOT_DIR, 'StanExtV12_Images')
13
+ JSON_V12_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', "StanfordExtra_v12.json")
14
+ STAN_V12_TRAIN_LIST_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', 'train_stanford_StanfordExtra_v12.npy')
15
+ STAN_V12_VAL_LIST_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', 'val_stanford_StanfordExtra_v12.npy')
16
+ STAN_V12_TEST_LIST_DIR = os.path.join(STAN_V12_ROOT_DIR, 'labels', 'test_stanford_StanfordExtra_v12.npy')
17
+ # (2) path to related data such as breed indices and prepared predictions for withers, throat and eye keypoints
18
+ STANEXT_RELATED_DATA_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'stanext_related_data')
19
+
20
+ # image crop dataset (for demo, visualization)
21
+ TEST_IMAGE_CROP_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'datasets', 'test_image_crops')
src/configs/dog_breeds/dog_breed_class.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import warnings
4
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
5
+ import pandas as pd
6
+ import difflib
7
+ import json
8
+ import pickle as pkl
9
+ import csv
10
+ import numpy as np
11
+
12
+
13
+ # ----------------------------------------------------------------------------------------------------------------- #
14
+ class DogBreed(object):
15
+ def __init__(self, abbrev, name_akc=None, name_stanext=None, name_xlsx=None, path_akc=None, path_stanext=None, ind_in_xlsx=None, ind_in_xlsx_matrix=None, ind_in_stanext=None, clade=None):
16
+ self._abbrev = abbrev
17
+ self._name_xlsx = name_xlsx
18
+ self._name_akc = name_akc
19
+ self._name_stanext = name_stanext
20
+ self._path_stanext = path_stanext
21
+ self._additional_names = set()
22
+ if self._name_akc is not None:
23
+ self.add_akc_info(name_akc, path_akc)
24
+ if self._name_stanext is not None:
25
+ self.add_stanext_info(name_stanext, path_stanext, ind_in_stanext)
26
+ if self._name_xlsx is not None:
27
+ self.add_xlsx_info(name_xlsx, ind_in_xlsx, ind_in_xlsx_matrix, clade)
28
+ def add_xlsx_info(self, name_xlsx, ind_in_xlsx, ind_in_xlsx_matrix, clade):
29
+ assert (name_xlsx is not None) and (ind_in_xlsx is not None) and (ind_in_xlsx_matrix is not None) and (clade is not None)
30
+ self._name_xlsx = name_xlsx
31
+ self._ind_in_xlsx = ind_in_xlsx
32
+ self._ind_in_xlsx_matrix = ind_in_xlsx_matrix
33
+ self._clade = clade
34
+ def add_stanext_info(self, name_stanext, path_stanext, ind_in_stanext):
35
+ assert (name_stanext is not None) and (path_stanext is not None) and (ind_in_stanext is not None)
36
+ self._name_stanext = name_stanext
37
+ self._path_stanext = path_stanext
38
+ self._ind_in_stanext = ind_in_stanext
39
+ def add_akc_info(self, name_akc, path_akc):
40
+ assert (name_akc is not None) and (path_akc is not None)
41
+ self._name_akc = name_akc
42
+ self._path_akc = path_akc
43
+ def add_additional_names(self, name_list):
44
+ self._additional_names = self._additional_names.union(set(name_list))
45
+ def add_text_info(self, text_height, text_weight, text_life_exp):
46
+ self._text_height = text_height
47
+ self._text_weight = text_weight
48
+ self._text_life_exp = text_life_exp
49
+ def get_datasets(self):
50
+ # all datasets in which this breed is found
51
+ datasets = set()
52
+ if self._name_akc is not None:
53
+ datasets.add('akc')
54
+ if self._name_stanext is not None:
55
+ datasets.add('stanext')
56
+ if self._name_xlsx is not None:
57
+ datasets.add('xlsx')
58
+ return datasets
59
+ def get_names(self):
60
+ # set of names for this breed
61
+ names = {self._abbrev, self._name_akc, self._name_stanext, self._name_xlsx, self._path_stanext}.union(self._additional_names)
62
+ names.discard(None)
63
+ return names
64
+ def get_names_as_pointing_dict(self):
65
+ # each name points to the abbreviation
66
+ names = self.get_names()
67
+ my_dict = {}
68
+ for name in names:
69
+ my_dict[name] = self._abbrev
70
+ return my_dict
71
+ def print_overview(self):
72
+ # print important information to get an overview of the class instance
73
+ if self._name_akc is not None:
74
+ name = self._name_akc
75
+ elif self._name_xlsx is not None:
76
+ name = self._name_xlsx
77
+ else:
78
+ name = self._name_stanext
79
+ print('----------------------------------------------------')
80
+ print('----- dog breed: ' + name )
81
+ print('----------------------------------------------------')
82
+ print('[names]')
83
+ print(self.get_names())
84
+ print('[datasets]')
85
+ print(self.get_datasets())
86
+ # see https://stackoverflow.com/questions/9058305/getting-attributes-of-a-class
87
+ print('[instance attributes]')
88
+ for attribute, value in self.__dict__.items():
89
+ print(attribute, '=', value)
90
+ def use_dict_to_save_class_instance(self):
91
+ my_dict = {}
92
+ for attribute, value in self.__dict__.items():
93
+ my_dict[attribute] = value
94
+ return my_dict
95
+ def use_dict_to_load_class_instance(self, my_dict):
96
+ for attribute, value in my_dict.items():
97
+ setattr(self, attribute, value)
98
+ return
99
+
100
+ # ----------------------------------------------------------------------------------------------------------------- #
101
+ def get_name_list_from_summary(summary):
102
+ name_from_abbrev_dict = {}
103
+ for breed in summary.values():
104
+ abbrev = breed._abbrev
105
+ all_names = breed.get_names()
106
+ name_from_abbrev_dict[abbrev] = list(all_names)
107
+ return name_from_abbrev_dict
108
+ def get_partial_summary(summary, part):
109
+ assert part in ['xlsx', 'akc', 'stanext']
110
+ partial_summary = {}
111
+ for key, value in summary.items():
112
+ if (part == 'xlsx' and value._name_xlsx is not None) \
113
+ or (part == 'akc' and value._name_akc is not None) \
114
+ or (part == 'stanext' and value._name_stanext is not None):
115
+ partial_summary[key] = value
116
+ return partial_summary
117
+ def get_akc_but_not_stanext_partial_summary(summary):
118
+ partial_summary = {}
119
+ for key, value in summary.items():
120
+ if value._name_akc is not None:
121
+ if value._name_stanext is None:
122
+ partial_summary[key] = value
123
+ return partial_summary
124
+
125
+ # ----------------------------------------------------------------------------------------------------------------- #
126
+ def main_load_dog_breed_classes(path_complete_abbrev_dict_v1, path_complete_summary_breeds_v1):
127
+ with open(path_complete_abbrev_dict_v1, 'rb') as file:
128
+ complete_abbrev_dict = pkl.load(file)
129
+ with open(path_complete_summary_breeds_v1, 'rb') as file:
130
+ complete_summary_breeds_attributes_only = pkl.load(file)
131
+
132
+ complete_summary_breeds = {}
133
+ for key, value in complete_summary_breeds_attributes_only.items():
134
+ attributes_only = complete_summary_breeds_attributes_only[key]
135
+ complete_summary_breeds[key] = DogBreed(abbrev=attributes_only['_abbrev'])
136
+ complete_summary_breeds[key].use_dict_to_load_class_instance(attributes_only)
137
+ return complete_abbrev_dict, complete_summary_breeds
138
+
139
+
140
+ # ----------------------------------------------------------------------------------------------------------------- #
141
+ def load_similarity_matrix_raw(xlsx_path):
142
+ # --- LOAD EXCEL FILE FROM DOG BREED PAPER
143
+ xlsx = pd.read_excel(xlsx_path)
144
+ # create an array
145
+ abbrev_indices = {}
146
+ matrix_raw = np.zeros((168, 168))
147
+ for ind in range(1, 169):
148
+ abbrev = xlsx[xlsx.columns[2]][ind]
149
+ abbrev_indices[abbrev] = ind-1
150
+ for ind_col in range(0, 168):
151
+ for ind_row in range(0, 168):
152
+ matrix_raw[ind_col, ind_row] = float(xlsx[xlsx.columns[3+ind_col]][1+ind_row])
153
+ return matrix_raw, abbrev_indices
154
+
155
+
156
+
157
+ # ----------------------------------------------------------------------------------------------------------------- #
158
+ # ----------------------------------------------------------------------------------------------------------------- #
159
+ # load the (in advance created) final dict of dog breed classes
160
+ ROOT_PATH_BREED_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', 'data', 'breed_data')
161
+ path_complete_abbrev_dict_v1 = os.path.join(ROOT_PATH_BREED_DATA, 'complete_abbrev_dict_v2.pkl')
162
+ path_complete_summary_breeds_v1 = os.path.join(ROOT_PATH_BREED_DATA, 'complete_summary_breeds_v2.pkl')
163
+ COMPLETE_ABBREV_DICT, COMPLETE_SUMMARY_BREEDS = main_load_dog_breed_classes(path_complete_abbrev_dict_v1, path_complete_summary_breeds_v1)
164
+ # load similarity matrix, data from:
165
+ # Parker H. G., Dreger D. L., Rimbault M., Davis B. W., Mullen A. B., Carpintero-Ramirez G., and Ostrander E. A.
166
+ # Genomic analyses reveal the influence of geographic origin, migration, and hybridization on modern dog breed
167
+ # development. Cell Reports, 4(19):697โ€“708, 2017.
168
+ xlsx_path = os.path.join(ROOT_PATH_BREED_DATA, 'NIHMS866262-supplement-2.xlsx')
169
+ SIM_MATRIX_RAW, SIM_ABBREV_INDICES = load_similarity_matrix_raw(xlsx_path)
170
+
src/lifting_to_3d/inn_model_for_shape.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from torch import distributions
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch.utils.data import DataLoader
8
+ from torch.distributions import Normal
9
+ import numpy as np
10
+ import cv2
11
+ import trimesh
12
+ from tqdm import tqdm
13
+ import warnings
14
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
15
+ import FrEIA.framework as Ff
16
+ import FrEIA.modules as Fm
17
+
18
+
19
+ class INNForShape(nn.Module):
20
+ def __init__(self, n_betas, n_betas_limbs, k_tot=2, betas_scale=1.0, betas_limbs_scale=0.1):
21
+ super(INNForShape, self).__init__()
22
+ self.n_betas = n_betas
23
+ self.n_betas_limbs = n_betas_limbs
24
+ self.n_dim = n_betas + n_betas_limbs
25
+ self.betas_scale = betas_scale
26
+ self.betas_limbs_scale = betas_limbs_scale
27
+ self.k_tot = 2
28
+ self.model_inn = self.build_inn_network(self.n_dim, k_tot=self.k_tot)
29
+
30
+ def subnet_fc(self, c_in, c_out):
31
+ subnet = nn.Sequential(nn.Linear(c_in, 64), nn.ReLU(),
32
+ nn.Linear(64, 64), nn.ReLU(),
33
+ nn.Linear(64, c_out))
34
+ return subnet
35
+
36
+ def build_inn_network(self, n_input, k_tot=12, verbose=False):
37
+ coupling_block = Fm.RNVPCouplingBlock
38
+ nodes = [Ff.InputNode(n_input, name='input')]
39
+ for k in range(k_tot):
40
+ nodes.append(Ff.Node(nodes[-1],
41
+ coupling_block,
42
+ {'subnet_constructor':self.subnet_fc, 'clamp':2.0},
43
+ name=F'coupling_{k}'))
44
+ nodes.append(Ff.Node(nodes[-1],
45
+ Fm.PermuteRandom,
46
+ {'seed':k},
47
+ name=F'permute_{k}'))
48
+ nodes.append(Ff.OutputNode(nodes[-1], name='output'))
49
+ model = Ff.ReversibleGraphNet(nodes, verbose=verbose)
50
+ return model
51
+
52
+ def forward(self, latent_rep):
53
+ shape, _ = self.model_inn(latent_rep, rev=False, jac=False)
54
+ betas = shape[:, :self.n_betas]*self.betas_scale
55
+ betas_limbs = shape[:, self.n_betas:]*self.betas_limbs_scale
56
+ return betas, betas_limbs
57
+
58
+ def reverse(self, betas, betas_limbs):
59
+ shape = torch.cat((betas/self.betas_scale, betas_limbs/self.betas_limbs_scale), dim=1)
60
+ latent_rep, _ = self.model_inn(shape, rev=True, jac=False)
61
+ return latent_rep
src/lifting_to_3d/linear_model.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # some code from https://raw.githubusercontent.com/weigq/3d_pose_baseline_pytorch/master/src/model.py
5
+
6
+
7
+ from __future__ import absolute_import
8
+ from __future__ import print_function
9
+ import torch
10
+ import torch.nn as nn
11
+
12
+ import os
13
+ import sys
14
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
15
+ # from priors.vae_pose_model.vae_model import VAEmodel
16
+ from priors.normalizing_flow_prior.normalizing_flow_prior import NormalizingFlowPrior
17
+
18
+
19
+ def weight_init_dangerous(m):
20
+ # this is dangerous as it may overwrite the normalizing flow weights
21
+ if isinstance(m, nn.Linear):
22
+ nn.init.kaiming_normal(m.weight)
23
+
24
+
25
+ class Linear(nn.Module):
26
+ def __init__(self, linear_size, p_dropout=0.5):
27
+ super(Linear, self).__init__()
28
+ self.l_size = linear_size
29
+
30
+ self.relu = nn.ReLU(inplace=True)
31
+ self.dropout = nn.Dropout(p_dropout)
32
+
33
+ self.w1 = nn.Linear(self.l_size, self.l_size)
34
+ self.batch_norm1 = nn.BatchNorm1d(self.l_size)
35
+
36
+ self.w2 = nn.Linear(self.l_size, self.l_size)
37
+ self.batch_norm2 = nn.BatchNorm1d(self.l_size)
38
+
39
+ def forward(self, x):
40
+ y = self.w1(x)
41
+ y = self.batch_norm1(y)
42
+ y = self.relu(y)
43
+ y = self.dropout(y)
44
+ y = self.w2(y)
45
+ y = self.batch_norm2(y)
46
+ y = self.relu(y)
47
+ y = self.dropout(y)
48
+ out = x + y
49
+ return out
50
+
51
+
52
+ class LinearModel(nn.Module):
53
+ def __init__(self,
54
+ linear_size=1024,
55
+ num_stage=2,
56
+ p_dropout=0.5,
57
+ input_size=16*2,
58
+ output_size=16*3):
59
+ super(LinearModel, self).__init__()
60
+ self.linear_size = linear_size
61
+ self.p_dropout = p_dropout
62
+ self.num_stage = num_stage
63
+ # input
64
+ self.input_size = input_size # 2d joints: 16 * 2
65
+ # output
66
+ self.output_size = output_size # 3d joints: 16 * 3
67
+ # process input to linear size
68
+ self.w1 = nn.Linear(self.input_size, self.linear_size)
69
+ self.batch_norm1 = nn.BatchNorm1d(self.linear_size)
70
+ self.linear_stages = []
71
+ for l in range(num_stage):
72
+ self.linear_stages.append(Linear(self.linear_size, self.p_dropout))
73
+ self.linear_stages = nn.ModuleList(self.linear_stages)
74
+ # post-processing
75
+ self.w2 = nn.Linear(self.linear_size, self.output_size)
76
+ # helpers (relu and dropout)
77
+ self.relu = nn.ReLU(inplace=True)
78
+ self.dropout = nn.Dropout(self.p_dropout)
79
+
80
+ def forward(self, x):
81
+ # pre-processing
82
+ y = self.w1(x)
83
+ y = self.batch_norm1(y)
84
+ y = self.relu(y)
85
+ y = self.dropout(y)
86
+ # linear layers
87
+ for i in range(self.num_stage):
88
+ y = self.linear_stages[i](y)
89
+ # post-processing
90
+ y = self.w2(y)
91
+ return y
92
+
93
+
94
+ class LinearModelComplete(nn.Module):
95
+ def __init__(self,
96
+ linear_size=1024,
97
+ num_stage_comb=2,
98
+ num_stage_heads=1,
99
+ num_stage_heads_pose=1,
100
+ trans_sep=False,
101
+ p_dropout=0.5,
102
+ input_size=16*2,
103
+ intermediate_size=1024,
104
+ output_info=None,
105
+ n_joints=25,
106
+ n_z=512,
107
+ add_z_to_3d_input=False,
108
+ n_segbps=64*2,
109
+ add_segbps_to_3d_input=False,
110
+ structure_pose_net='default',
111
+ fix_vae_weights=True,
112
+ nf_version=None): # 0): n_silh_enc
113
+ super(LinearModelComplete, self).__init__()
114
+ if add_z_to_3d_input:
115
+ self.n_z_to_add = n_z # 512
116
+ else:
117
+ self.n_z_to_add = 0
118
+ if add_segbps_to_3d_input:
119
+ self.n_segbps_to_add = n_segbps # 64
120
+ else:
121
+ self.n_segbps_to_add = 0
122
+ self.input_size = input_size
123
+ self.linear_size = linear_size
124
+ self.p_dropout = p_dropout
125
+ self.num_stage_comb = num_stage_comb
126
+ self.num_stage_heads = num_stage_heads
127
+ self.num_stage_heads_pose = num_stage_heads_pose
128
+ self.trans_sep = trans_sep
129
+ self.input_size = input_size
130
+ self.intermediate_size = intermediate_size
131
+ self.structure_pose_net = structure_pose_net
132
+ self.fix_vae_weights = fix_vae_weights # only relevant if structure_pose_net='vae'
133
+ self.nf_version = nf_version
134
+ if output_info is None:
135
+ pose = {'name': 'pose', 'n': n_joints*6, 'out_shape':[n_joints, 6]}
136
+ cam = {'name': 'flength', 'n': 1}
137
+ if self.trans_sep:
138
+ translation_xy = {'name': 'trans_xy', 'n': 2}
139
+ translation_z = {'name': 'trans_z', 'n': 1}
140
+ self.output_info = [pose, translation_xy, translation_z, cam]
141
+ else:
142
+ translation = {'name': 'trans', 'n': 3}
143
+ self.output_info = [pose, translation, cam]
144
+ if self.structure_pose_net == 'vae' or self.structure_pose_net == 'normflow':
145
+ global_pose = {'name': 'global_pose', 'n': 1*6, 'out_shape':[1, 6]}
146
+ self.output_info.append(global_pose)
147
+ else:
148
+ self.output_info = output_info
149
+ self.linear_combined = LinearModel(linear_size=self.linear_size,
150
+ num_stage=self.num_stage_comb,
151
+ p_dropout=p_dropout,
152
+ input_size=self.input_size + self.n_segbps_to_add + self.n_z_to_add, ######
153
+ output_size=self.intermediate_size)
154
+ self.output_info_linear_models = []
155
+ for ind_el, element in enumerate(self.output_info):
156
+ if element['name'] == 'pose':
157
+ num_stage = self.num_stage_heads_pose
158
+ if self.structure_pose_net == 'default':
159
+ output_size_pose_lin = element['n']
160
+ elif self.structure_pose_net == 'vae':
161
+ # load vae decoder
162
+ self.pose_vae_model = VAEmodel()
163
+ self.pose_vae_model.initialize_with_pretrained_weights()
164
+ # define the input size of the vae decoder
165
+ output_size_pose_lin = self.pose_vae_model.latent_size
166
+ elif self.structure_pose_net == 'normflow':
167
+ # the following will automatically be initialized
168
+ self.pose_normflow_model = NormalizingFlowPrior(nf_version=self.nf_version)
169
+ output_size_pose_lin = element['n'] - 6 # no global rotation
170
+ else:
171
+ raise NotImplementedError
172
+ self.output_info_linear_models.append(LinearModel(linear_size=self.linear_size,
173
+ num_stage=num_stage,
174
+ p_dropout=p_dropout,
175
+ input_size=self.intermediate_size,
176
+ output_size=output_size_pose_lin))
177
+ else:
178
+ if element['name'] == 'global_pose':
179
+ num_stage = self.num_stage_heads_pose
180
+ else:
181
+ num_stage = self.num_stage_heads
182
+ self.output_info_linear_models.append(LinearModel(linear_size=self.linear_size,
183
+ num_stage=num_stage,
184
+ p_dropout=p_dropout,
185
+ input_size=self.intermediate_size,
186
+ output_size=element['n']))
187
+ element['linear_model_index'] = ind_el
188
+ self.output_info_linear_models = nn.ModuleList(self.output_info_linear_models)
189
+
190
+ def forward(self, x):
191
+ device = x.device
192
+ # combined stage
193
+ if x.shape[1] == self.input_size + self.n_segbps_to_add + self.n_z_to_add:
194
+ y = self.linear_combined(x)
195
+ elif x.shape[1] == self.input_size + self.n_segbps_to_add:
196
+ x_mod = torch.cat((x, torch.normal(0, 1, size=(x.shape[0], self.n_z_to_add)).to(device)), dim=1)
197
+ y = self.linear_combined(x_mod)
198
+ else:
199
+ print(x.shape)
200
+ print(self.input_size)
201
+ print(self.n_segbps_to_add)
202
+ print(self.n_z_to_add)
203
+ raise ValueError
204
+ # heads
205
+ results = {}
206
+ results_trans = {}
207
+ for element in self.output_info:
208
+ linear_model = self.output_info_linear_models[element['linear_model_index']]
209
+ if element['name'] == 'pose':
210
+ if self.structure_pose_net == 'default':
211
+ results['pose'] = (linear_model(y)).reshape((-1, element['out_shape'][0], element['out_shape'][1]))
212
+ normflow_z = None
213
+ elif self.structure_pose_net == 'vae':
214
+ res_lin = linear_model(y)
215
+ if self.fix_vae_weights:
216
+ self.pose_vae_model.requires_grad_(False) # let gradients flow through but don't update the parameters
217
+ res_vae = self.pose_vae_model.inference(feat=res_lin)
218
+ self.pose_vae_model.requires_grad_(True)
219
+ else:
220
+ res_vae = self.pose_vae_model.inference(feat=res_lin)
221
+ res_pose_not_glob = res_vae.reshape((-1, element['out_shape'][0], element['out_shape'][1]))
222
+ normflow_z = None
223
+ elif self.structure_pose_net == 'normflow':
224
+ normflow_z = linear_model(y)*0.1
225
+ self.pose_normflow_model.requires_grad_(False) # let gradients flow though but don't update the parameters
226
+ res_pose_not_glob = self.pose_normflow_model.run_backwards(z=normflow_z).reshape((-1, element['out_shape'][0]-1, element['out_shape'][1]))
227
+ else:
228
+ raise NotImplementedError
229
+ elif element['name'] == 'global_pose':
230
+ res_pose_glob = (linear_model(y)).reshape((-1, element['out_shape'][0], element['out_shape'][1]))
231
+ elif element['name'] == 'trans_xy' or element['name'] == 'trans_z':
232
+ results_trans[element['name']] = linear_model(y)
233
+ else:
234
+ results[element['name']] = linear_model(y)
235
+ if self.trans_sep:
236
+ results['trans'] = torch.cat((results_trans['trans_xy'], results_trans['trans_z']), dim=1)
237
+ # prepare pose including global rotation
238
+ if self.structure_pose_net == 'vae':
239
+ # results['pose'] = torch.cat((res_pose_glob, res_pose_not_glob), dim=1)
240
+ results['pose'] = torch.cat((res_pose_glob, res_pose_not_glob[:, 1:, :]), dim=1)
241
+ elif self.structure_pose_net == 'normflow':
242
+ results['pose'] = torch.cat((res_pose_glob, res_pose_not_glob[:, :, :]), dim=1)
243
+ # return a dictionary which contains all results
244
+ results['normflow_z'] = normflow_z
245
+ return results # this is a dictionary
246
+
247
+
248
+
249
+
250
+
251
+ # ------------------------------------------
252
+ # for pretraining of the 3d model only:
253
+ # (see combined_model/model_shape_v2.py)
254
+
255
+ class Wrapper_LinearModelComplete(nn.Module):
256
+ def __init__(self,
257
+ linear_size=1024,
258
+ num_stage_comb=2,
259
+ num_stage_heads=1,
260
+ num_stage_heads_pose=1,
261
+ trans_sep=False,
262
+ p_dropout=0.5,
263
+ input_size=16*2,
264
+ intermediate_size=1024,
265
+ output_info=None,
266
+ n_joints=25,
267
+ n_z=512,
268
+ add_z_to_3d_input=False,
269
+ n_segbps=64*2,
270
+ add_segbps_to_3d_input=False,
271
+ structure_pose_net='default',
272
+ fix_vae_weights=True,
273
+ nf_version=None):
274
+ self.add_segbps_to_3d_input = add_segbps_to_3d_input
275
+ super(Wrapper_LinearModelComplete, self).__init__()
276
+ self.model_3d = LinearModelComplete(linear_size=linear_size,
277
+ num_stage_comb=num_stage_comb,
278
+ num_stage_heads=num_stage_heads,
279
+ num_stage_heads_pose=num_stage_heads_pose,
280
+ trans_sep=trans_sep,
281
+ p_dropout=p_dropout, # 0.5,
282
+ input_size=input_size,
283
+ intermediate_size=intermediate_size,
284
+ output_info=output_info,
285
+ n_joints=n_joints,
286
+ n_z=n_z,
287
+ add_z_to_3d_input=add_z_to_3d_input,
288
+ n_segbps=n_segbps,
289
+ add_segbps_to_3d_input=add_segbps_to_3d_input,
290
+ structure_pose_net=structure_pose_net,
291
+ fix_vae_weights=fix_vae_weights,
292
+ nf_version=nf_version)
293
+ def forward(self, input_vec):
294
+ # input_vec = torch.cat((keypoints_prepared.reshape((batch_size, -1)), bone_lengths_prepared), axis=1)
295
+ # predict 3d parameters (those are normalized, we need to correct mean and std in a next step)
296
+ output = self.model_3d(input_vec)
297
+ return output
src/lifting_to_3d/utils/geometry_utils.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from torch.nn import functional as F
4
+ import numpy as np
5
+ from torch import nn
6
+
7
+
8
+ def geodesic_loss(R, Rgt):
9
+ # see: Silvia tiger pose model 3d code
10
+ num_joints = R.shape[1]
11
+ RT = R.permute(0,1,3,2)
12
+ A = torch.matmul(RT.view(-1,3,3),Rgt.view(-1,3,3))
13
+ # torch.trace works only for 2D tensors
14
+ n = A.shape[0]
15
+ po_loss = 0
16
+ eps = 1e-7
17
+ T = torch.sum(A[:,torch.eye(3).bool()],1)
18
+ theta = torch.clamp(0.5*(T-1), -1+eps, 1-eps)
19
+ angles = torch.acos(theta)
20
+ loss = torch.sum(angles)/(n*num_joints)
21
+ return loss
22
+
23
+ class geodesic_loss_R(nn.Module):
24
+ def __init__(self,reduction='mean'):
25
+ super(geodesic_loss_R, self).__init__()
26
+ self.reduction = reduction
27
+ self.eps = 1e-6
28
+
29
+ # batch geodesic loss for rotation matrices
30
+ def bgdR(self,bRgts,bRps):
31
+ #return((bRgts - bRps)**2.).mean()
32
+ return geodesic_loss(bRgts, bRps)
33
+
34
+ def forward(self, ypred, ytrue):
35
+ theta = geodesic_loss(ypred,ytrue)
36
+ if self.reduction == 'mean':
37
+ return torch.mean(theta)
38
+ else:
39
+ return theta
40
+
41
+ def batch_rodrigues_numpy(theta):
42
+ """ Code adapted from spin
43
+ Convert axis-angle representation to rotation matrix.
44
+ Remark:
45
+ this leads to the same result as kornia.angle_axis_to_rotation_matrix(theta)
46
+ Args:
47
+ theta: size = [B, 3]
48
+ Returns:
49
+ Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
50
+ """
51
+ l1norm = np.linalg.norm(theta + 1e-8, ord = 2, axis = 1)
52
+ # angle = np.unsqueeze(l1norm, -1)
53
+ angle = l1norm.reshape((-1, 1))
54
+ # normalized = np.div(theta, angle)
55
+ normalized = theta / angle
56
+ angle = angle * 0.5
57
+ v_cos = np.cos(angle)
58
+ v_sin = np.sin(angle)
59
+ # quat = np.cat([v_cos, v_sin * normalized], dim = 1)
60
+ quat = np.concatenate([v_cos, v_sin * normalized], axis = 1)
61
+ return quat_to_rotmat_numpy(quat)
62
+
63
+ def quat_to_rotmat_numpy(quat):
64
+ """Code from: https://github.com/nkolot/SPIN/blob/master/utils/geometry.py
65
+ Convert quaternion coefficients to rotation matrix.
66
+ Args:
67
+ quat: size = [B, 4] 4 <===>(w, x, y, z)
68
+ Returns:
69
+ Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
70
+ """
71
+ norm_quat = quat
72
+ # norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
73
+ norm_quat = norm_quat/np.linalg.norm(norm_quat, ord=2, axis=1, keepdims=True)
74
+ w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
75
+ B = quat.shape[0]
76
+ # w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
77
+ w2, x2, y2, z2 = w**2, x**2, y**2, z**2
78
+ wx, wy, wz = w*x, w*y, w*z
79
+ xy, xz, yz = x*y, x*z, y*z
80
+ rotMat = np.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
81
+ 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
82
+ 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], axis=1).reshape(B, 3, 3)
83
+ return rotMat
84
+
85
+
86
+ def batch_rodrigues(theta):
87
+ """Code from: https://github.com/nkolot/SPIN/blob/master/utils/geometry.py
88
+ Convert axis-angle representation to rotation matrix.
89
+ Remark:
90
+ this leads to the same result as kornia.angle_axis_to_rotation_matrix(theta)
91
+ Args:
92
+ theta: size = [B, 3]
93
+ Returns:
94
+ Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
95
+ """
96
+ l1norm = torch.norm(theta + 1e-8, p = 2, dim = 1)
97
+ angle = torch.unsqueeze(l1norm, -1)
98
+ normalized = torch.div(theta, angle)
99
+ angle = angle * 0.5
100
+ v_cos = torch.cos(angle)
101
+ v_sin = torch.sin(angle)
102
+ quat = torch.cat([v_cos, v_sin * normalized], dim = 1)
103
+ return quat_to_rotmat(quat)
104
+
105
+ def batch_rot2aa(Rs, epsilon=1e-7):
106
+ """ Code from: https://github.com/vchoutas/expose/blob/dffc38d62ad3817481d15fe509a93c2bb606cb8b/expose/utils/rotation_utils.py#L55
107
+ Rs is B x 3 x 3
108
+ void cMathUtil::RotMatToAxisAngle(const tMatrix& mat, tVector& out_axis,
109
+ double& out_theta)
110
+ {
111
+ double c = 0.5 * (mat(0, 0) + mat(1, 1) + mat(2, 2) - 1);
112
+ c = cMathUtil::Clamp(c, -1.0, 1.0);
113
+ out_theta = std::acos(c);
114
+ if (std::abs(out_theta) < 0.00001)
115
+ {
116
+ out_axis = tVector(0, 0, 1, 0);
117
+ }
118
+ else
119
+ {
120
+ double m21 = mat(2, 1) - mat(1, 2);
121
+ double m02 = mat(0, 2) - mat(2, 0);
122
+ double m10 = mat(1, 0) - mat(0, 1);
123
+ double denom = std::sqrt(m21 * m21 + m02 * m02 + m10 * m10);
124
+ out_axis[0] = m21 / denom;
125
+ out_axis[1] = m02 / denom;
126
+ out_axis[2] = m10 / denom;
127
+ out_axis[3] = 0;
128
+ }
129
+ }
130
+ """
131
+ cos = 0.5 * (torch.einsum('bii->b', [Rs]) - 1)
132
+ cos = torch.clamp(cos, -1 + epsilon, 1 - epsilon)
133
+ theta = torch.acos(cos)
134
+ m21 = Rs[:, 2, 1] - Rs[:, 1, 2]
135
+ m02 = Rs[:, 0, 2] - Rs[:, 2, 0]
136
+ m10 = Rs[:, 1, 0] - Rs[:, 0, 1]
137
+ denom = torch.sqrt(m21 * m21 + m02 * m02 + m10 * m10 + epsilon)
138
+ axis0 = torch.where(torch.abs(theta) < 0.00001, m21, m21 / denom)
139
+ axis1 = torch.where(torch.abs(theta) < 0.00001, m02, m02 / denom)
140
+ axis2 = torch.where(torch.abs(theta) < 0.00001, m10, m10 / denom)
141
+ return theta.unsqueeze(1) * torch.stack([axis0, axis1, axis2], 1)
142
+
143
+ def quat_to_rotmat(quat):
144
+ """Code from: https://github.com/nkolot/SPIN/blob/master/utils/geometry.py
145
+ Convert quaternion coefficients to rotation matrix.
146
+ Args:
147
+ quat: size = [B, 4] 4 <===>(w, x, y, z)
148
+ Returns:
149
+ Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
150
+ """
151
+ norm_quat = quat
152
+ norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
153
+ w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
154
+
155
+ B = quat.size(0)
156
+
157
+ w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
158
+ wx, wy, wz = w*x, w*y, w*z
159
+ xy, xz, yz = x*y, x*z, y*z
160
+
161
+ rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
162
+ 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
163
+ 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
164
+ return rotMat
165
+
166
+ def rot6d_to_rotmat(rot6d):
167
+ """ Code from: https://github.com/nkolot/SPIN/blob/master/utils/geometry.py
168
+ Convert 6D rotation representation to 3x3 rotation matrix.
169
+ Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
170
+ Input:
171
+ (B,6) Batch of 6-D rotation representations
172
+ Output:
173
+ (B,3,3) Batch of corresponding rotation matrices
174
+ """
175
+ rot6d = rot6d.view(-1,3,2)
176
+ a1 = rot6d[:, :, 0]
177
+ a2 = rot6d[:, :, 1]
178
+ b1 = F.normalize(a1)
179
+ b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
180
+ b3 = torch.cross(b1, b2)
181
+ rotmat = torch.stack((b1, b2, b3), dim=-1)
182
+ return rotmat
183
+
184
+ def rotmat_to_rot6d(rotmat):
185
+ """ Convert 3x3 rotation matrix to 6D rotation representation.
186
+ Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
187
+ Input:
188
+ (B,3,3) Batch of corresponding rotation matrices
189
+ Output:
190
+ (B,6) Batch of 6-D rotation representations
191
+ """
192
+ rot6d = rotmat[:, :, :2].reshape((-1, 6))
193
+ return rot6d
194
+
195
+
196
+ def main():
197
+ # rotation matrix and 6d representation
198
+ # see "On the Continuity of Rotation Representations in Neural Networks"
199
+ from pyquaternion import Quaternion
200
+ batch_size = 5
201
+ rotmat = np.zeros((batch_size, 3, 3))
202
+ for ind in range(0, batch_size):
203
+ rotmat[ind, :, :] = Quaternion.random().rotation_matrix
204
+ rotmat_torch = torch.Tensor(rotmat)
205
+ rot6d = rotmat_to_rot6d(rotmat_torch)
206
+ rotmat_rec = rot6d_to_rotmat(rot6d)
207
+ print('..................... 1 ....................')
208
+ print(rotmat_torch[0, :, :])
209
+ print(rotmat_rec[0, :, :])
210
+ print('Conversion from rotmat to rot6d and inverse are ok!')
211
+ # rotation matrix and axis angle representation
212
+ import kornia
213
+ input = torch.rand(1, 3)
214
+ output = kornia.angle_axis_to_rotation_matrix(input)
215
+ input_rec = kornia.rotation_matrix_to_angle_axis(output)
216
+ print('..................... 2 ....................')
217
+ print(input)
218
+ print(input_rec)
219
+ print('Kornia implementation for rotation_matrix_to_angle_axis is wrong!!!!')
220
+ # For non-differential conversions use scipy:
221
+ # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html
222
+ from scipy.spatial.transform import Rotation as R
223
+ r = R.from_matrix(rotmat[0, :, :])
224
+ print('..................... 3 ....................')
225
+ print(r.as_matrix())
226
+ print(r.as_rotvec())
227
+ print(r.as_quaternion)
228
+ # one might furthermore have a look at:
229
+ # https://github.com/silviazuffi/smalst/blob/master/utils/transformations.py
230
+
231
+
232
+
233
+ if __name__ == "__main__":
234
+ main()
235
+
236
+
src/metrics/metrics.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # code from: https://github.com/benjiebob/WLDO/blob/master/wldo_regressor/metrics.py
2
+
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import numpy as np
7
+
8
+ IMG_RES = 256 # in WLDO it is 224
9
+
10
+ class Metrics():
11
+
12
+ @staticmethod
13
+ def PCK_thresh(
14
+ pred_keypoints, gt_keypoints,
15
+ gtseg, has_seg,
16
+ thresh, idxs, biggs=False):
17
+
18
+ pred_keypoints, gt_keypoints, gtseg = pred_keypoints[has_seg], gt_keypoints[has_seg], gtseg[has_seg]
19
+
20
+ if idxs is None:
21
+ idxs = list(range(pred_keypoints.shape[1]))
22
+
23
+ idxs = np.array(idxs).astype(int)
24
+
25
+ pred_keypoints = pred_keypoints[:, idxs]
26
+ gt_keypoints = gt_keypoints[:, idxs]
27
+
28
+ if biggs:
29
+ keypoints_gt = ((gt_keypoints + 1.0) * 0.5) * IMG_RES
30
+ dist = torch.norm(pred_keypoints - keypoints_gt[:, :, [1, 0]], dim = -1)
31
+ else:
32
+ keypoints_gt = gt_keypoints # (0 to IMG_SIZE)
33
+ dist = torch.norm(pred_keypoints - keypoints_gt[:, :, :2], dim = -1)
34
+
35
+ seg_area = torch.sum(gtseg.reshape(gtseg.shape[0], -1), dim = -1).unsqueeze(-1)
36
+
37
+ hits = (dist / torch.sqrt(seg_area)) < thresh
38
+ total_visible = torch.sum(gt_keypoints[:, :, -1], dim = -1)
39
+ pck = torch.sum(hits.float() * gt_keypoints[:, :, -1], dim = -1) / total_visible
40
+
41
+ return pck
42
+
43
+ @staticmethod
44
+ def PCK(
45
+ pred_keypoints, keypoints,
46
+ gtseg, has_seg,
47
+ thresh_range=[0.15],
48
+ idxs:list=None,
49
+ biggs=False):
50
+ """Calc PCK with same method as in eval.
51
+ idxs = optional list of subset of keypoints to index from
52
+ """
53
+ cumulative_pck = []
54
+ for thresh in thresh_range:
55
+ pck = Metrics.PCK_thresh(
56
+ pred_keypoints, keypoints,
57
+ gtseg, has_seg, thresh, idxs,
58
+ biggs=biggs)
59
+ cumulative_pck.append(pck)
60
+ pck_mean = torch.stack(cumulative_pck, dim = 0).mean(dim=0)
61
+ return pck_mean
62
+
63
+ @staticmethod
64
+ def IOU(synth_silhouettes, gt_seg, img_border_mask, mask):
65
+ for i in range(mask.shape[0]):
66
+ synth_silhouettes[i] *= mask[i]
67
+ # Do not penalize parts of the segmentation outside the img range
68
+ gt_seg = (gt_seg * img_border_mask) + synth_silhouettes * (1.0 - img_border_mask)
69
+ intersection = torch.sum((synth_silhouettes * gt_seg).reshape(synth_silhouettes.shape[0], -1), dim = -1)
70
+ union = torch.sum(((synth_silhouettes + gt_seg).reshape(synth_silhouettes.shape[0], -1) > 0.0).float(), dim = -1)
71
+ acc_IOU_SCORE = intersection / union
72
+ if torch.isnan(acc_IOU_SCORE).sum() > 0:
73
+ import pdb; pdb.set_trace()
74
+ return acc_IOU_SCORE
src/priors/normalizing_flow_prior/normalizing_flow_prior.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from torch import distributions
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch.utils.data import DataLoader
7
+ from torch.distributions import Normal
8
+ import numpy as np
9
+ import cv2
10
+ import trimesh
11
+ from tqdm import tqdm
12
+
13
+ import warnings
14
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
15
+ import FrEIA.framework as Ff
16
+ import FrEIA.modules as Fm
17
+ from configs.barc_cfg_defaults import get_cfg_global_updated
18
+
19
+
20
+ class NormalizingFlowPrior(nn.Module):
21
+ def __init__(self, nf_version=None):
22
+ super(NormalizingFlowPrior, self).__init__()
23
+ # the normalizing flow network takes as input a vector of size (35-1)*6 which is
24
+ # [all joints except root joint]*6. At the moment the rotation is represented as 6D
25
+ # representation, which is actually not ideal. Nevertheless, in practice the
26
+ # results seem to be ok.
27
+ n_dim = (35 - 1) * 6
28
+ self.param_dict = self.get_version_param_dict(nf_version)
29
+ self.model_inn = self.build_inn_network(n_dim, k_tot=self.param_dict['k_tot'])
30
+ self.initialize_with_pretrained_weights()
31
+
32
+ def get_version_param_dict(self, nf_version):
33
+ # we had trained several version of the normalizing flow pose prior, here we just provide
34
+ # the option that was user for the cvpr 2022 paper (nf_version=3)
35
+ if nf_version == 3:
36
+ param_dict = {
37
+ 'k_tot': 2,
38
+ 'path_pretrained': get_cfg_global_updated().paths.MODELPATH_NORMFLOW,
39
+ 'subnet_fc_type': '3_64'}
40
+ else:
41
+ print(nf_version)
42
+ raise ValueError
43
+ return param_dict
44
+
45
+ def initialize_with_pretrained_weights(self, weight_path=None):
46
+ # The normalizing flow pose prior is pretrained separately. Afterwards all weights
47
+ # are kept fixed. Here we load those pretrained weights.
48
+ if weight_path is None:
49
+ weight_path = self.param_dict['path_pretrained']
50
+ print(' normalizing flow pose prior: loading {}..'.format(weight_path))
51
+ pretrained_dict = torch.load(weight_path)['model_state_dict']
52
+ self.model_inn.load_state_dict(pretrained_dict, strict=True)
53
+
54
+ def subnet_fc(self, c_in, c_out):
55
+ if self.param_dict['subnet_fc_type']=='3_512':
56
+ subnet = nn.Sequential(nn.Linear(c_in, 512), nn.ReLU(),
57
+ nn.Linear(512, 512), nn.ReLU(),
58
+ nn.Linear(512, c_out))
59
+ elif self.param_dict['subnet_fc_type']=='3_64':
60
+ subnet = nn.Sequential(nn.Linear(c_in, 64), nn.ReLU(),
61
+ nn.Linear(64, 64), nn.ReLU(),
62
+ nn.Linear(64, c_out))
63
+ return subnet
64
+
65
+ def build_inn_network(self, n_input, k_tot=12, verbose=False):
66
+ coupling_block = Fm.RNVPCouplingBlock
67
+ nodes = [Ff.InputNode(n_input, name='input')]
68
+ for k in range(k_tot):
69
+ nodes.append(Ff.Node(nodes[-1],
70
+ coupling_block,
71
+ {'subnet_constructor':self.subnet_fc, 'clamp':2.0},
72
+ name=F'coupling_{k}'))
73
+ nodes.append(Ff.Node(nodes[-1],
74
+ Fm.PermuteRandom,
75
+ {'seed':k},
76
+ name=F'permute_{k}'))
77
+ nodes.append(Ff.OutputNode(nodes[-1], name='output'))
78
+ model = Ff.ReversibleGraphNet(nodes, verbose=verbose)
79
+ return model
80
+
81
+ def calculate_loss_from_z(self, z, type='square'):
82
+ assert type in ['square', 'neg_log_prob']
83
+ if type == 'square':
84
+ loss = (z**2).mean() # * 0.00001
85
+ elif type == 'neg_log_prob':
86
+ means = torch.zeros((z.shape[0], z.shape[1]), dtype=z.dtype, device=z.device)
87
+ stds = torch.ones((z.shape[0], z.shape[1]), dtype=z.dtype, device=z.device)
88
+ normal_distribution = Normal(means, stds)
89
+ log_prob = normal_distribution.log_prob(z)
90
+ loss = - log_prob.mean()
91
+ return loss
92
+
93
+ def calculate_loss(self, poses_rot6d, type='square'):
94
+ assert type in ['square', 'neg_log_prob']
95
+ poses_rot6d_noglob = poses_rot6d[:, 1:, :].reshape((-1, 34*6))
96
+ z, _ = self.model_inn(poses_rot6d_noglob, rev=False, jac=False)
97
+ loss = self.calculate_loss_from_z(z, type=type)
98
+ return loss
99
+
100
+ def forward(self, poses_rot6d):
101
+ # from pose to latent pose representation z
102
+ # poses_rot6d has shape (bs, 34, 6)
103
+ poses_rot6d_noglob = poses_rot6d[:, 1:, :].reshape((-1, 34*6))
104
+ z, _ = self.model_inn(poses_rot6d_noglob, rev=False, jac=False)
105
+ return z
106
+
107
+ def run_backwards(self, z):
108
+ # from latent pose representation z to pose
109
+ poses_rot6d_noglob, _ = self.model_inn(z, rev=True, jac=False)
110
+ return poses_rot6d_noglob
111
+
112
+
113
+
114
+
115
+
src/priors/shape_prior.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # some parts of the code adapted from https://github.com/benjiebob/WLDO and https://github.com/benjiebob/SMALify
3
+
4
+ import numpy as np
5
+ import torch
6
+ import pickle as pkl
7
+
8
+
9
+
10
+ class ShapePrior(torch.nn.Module):
11
+ def __init__(self, prior_path):
12
+ super(ShapePrior, self).__init__()
13
+ try:
14
+ with open(prior_path, 'r') as f:
15
+ res = pkl.load(f)
16
+ except (UnicodeDecodeError, TypeError) as e:
17
+ with open(prior_path, 'rb') as file:
18
+ u = pkl._Unpickler(file)
19
+ u.encoding = 'latin1'
20
+ res = u.load()
21
+ betas_mean = res['dog_cluster_mean']
22
+ betas_cov = res['dog_cluster_cov']
23
+ single_gaussian_inv_covs = np.linalg.inv(betas_cov + 1e-5 * np.eye(betas_cov.shape[0]))
24
+ single_gaussian_precs = torch.tensor(np.linalg.cholesky(single_gaussian_inv_covs)).float()
25
+ single_gaussian_means = torch.tensor(betas_mean).float()
26
+ self.register_buffer('single_gaussian_precs', single_gaussian_precs) # (20, 20)
27
+ self.register_buffer('single_gaussian_means', single_gaussian_means) # (20)
28
+ use_ind_tch = torch.from_numpy(np.ones(single_gaussian_means.shape[0], dtype=bool)).float() # .to(device)
29
+ self.register_buffer('use_ind_tch', use_ind_tch)
30
+
31
+ def forward(self, betas_smal_orig, use_singe_gaussian=False):
32
+ n_betas_smal = betas_smal_orig.shape[1]
33
+ device = betas_smal_orig.device
34
+ use_ind_tch_corrected = self.use_ind_tch * torch.cat((torch.ones_like(self.use_ind_tch[:n_betas_smal]), torch.zeros_like(self.use_ind_tch[n_betas_smal:])))
35
+ samples = torch.cat((betas_smal_orig, torch.zeros((betas_smal_orig.shape[0], self.single_gaussian_means.shape[0]-n_betas_smal)).float().to(device)), dim=1)
36
+ mean_sub = samples - self.single_gaussian_means.unsqueeze(0)
37
+ single_gaussian_precs_corr = self.single_gaussian_precs * use_ind_tch_corrected[:, None] * use_ind_tch_corrected[None, :]
38
+ res = torch.tensordot(mean_sub, single_gaussian_precs_corr, dims = ([1], [0]))
39
+ res_final_mean_2 = torch.mean(res ** 2)
40
+ return res_final_mean_2
src/smal_pytorch/renderer/differentiable_renderer.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # part of the code from
3
+ # https://github.com/benjiebob/SMALify/blob/master/smal_fitter/p3d_renderer.py
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from scipy.io import loadmat
8
+ import numpy as np
9
+ # import config
10
+
11
+ import pytorch3d
12
+ from pytorch3d.structures import Meshes
13
+ from pytorch3d.renderer import (
14
+ PerspectiveCameras, look_at_view_transform, look_at_rotation,
15
+ RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
16
+ PointLights, HardPhongShader, SoftSilhouetteShader, Materials, Textures,
17
+ DirectionalLights
18
+ )
19
+ from pytorch3d.renderer import TexturesVertex, SoftPhongShader
20
+ from pytorch3d.io import load_objs_as_meshes
21
+
22
+ MESH_COLOR_0 = [0, 172, 223]
23
+ MESH_COLOR_1 = [172, 223, 0]
24
+
25
+
26
+ '''
27
+ Explanation of the shift between projection results from opendr and pytorch3d:
28
+ (0, 0, ?) will be projected to 127.5 (pytorch3d) instead of 128 (opendr)
29
+ imagine you have an image of size 4:
30
+ middle of the first pixel is 0
31
+ middle of the last pixel is 3
32
+ => middle of the imgae would be 1.5 and not 2!
33
+ so in order to go from pytorch3d predictions to opendr we would calculate: p_odr = p_p3d * (128/127.5)
34
+ To reproject points (p3d) by hand according to this pytorch3d renderer we would do the following steps:
35
+ 1.) build camera matrix
36
+ K = np.array([[flength, 0, c_x],
37
+ [0, flength, c_y],
38
+ [0, 0, 1]], np.float)
39
+ 2.) we don't need to add extrinsics, as the mesh comes with translation (which is
40
+ added within smal_pytorch). all 3d points are already in the camera coordinate system.
41
+ -> projection reduces to p2d_proj = K*p3d
42
+ 3.) convert to pytorch3d conventions (0 in the middle of the first pixel)
43
+ p2d_proj_pytorch3d = p2d_proj / image_size * (image_size-1.)
44
+ renderer.py - project_points_p3d: shows an example of what is described above, but
45
+ same focal length for the whole batch
46
+
47
+ '''
48
+
49
+ class SilhRenderer(torch.nn.Module):
50
+ def __init__(self, image_size, adapt_R_wldo=False):
51
+ super(SilhRenderer, self).__init__()
52
+ # see: https://pytorch3d.org/files/fit_textured_mesh.py, line 315
53
+ # adapt_R=True is True for all my experiments
54
+ # image_size: one number, integer
55
+ # -----
56
+ # set mesh color
57
+ self.register_buffer('mesh_color_0', torch.FloatTensor(MESH_COLOR_0))
58
+ self.register_buffer('mesh_color_1', torch.FloatTensor(MESH_COLOR_1))
59
+ # prepare extrinsics, which in our case don't change
60
+ R = torch.Tensor(np.eye(3)).float()[None, :, :]
61
+ T = torch.Tensor(np.zeros((1, 3))).float()
62
+ if adapt_R_wldo:
63
+ R[0, 0, 0] = -1
64
+ else: # used for all my own experiments
65
+ R[0, 0, 0] = -1
66
+ R[0, 1, 1] = -1
67
+ self.register_buffer('R', R)
68
+ self.register_buffer('T', T)
69
+ # prepare that part of the intrinsics which does not change either
70
+ # principal_point_prep = torch.Tensor([self.image_size / 2., self.image_size / 2.]).float()[None, :].float().to(device)
71
+ # image_size_prep = torch.Tensor([self.image_size, self.image_size]).float()[None, :].float().to(device)
72
+ self.img_size_scalar = image_size
73
+ self.register_buffer('image_size', torch.Tensor([image_size, image_size]).float()[None, :].float())
74
+ self.register_buffer('principal_point', torch.Tensor([image_size / 2., image_size / 2.]).float()[None, :].float())
75
+ # Rasterization settings for differentiable rendering, where the blur_radius
76
+ # initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable
77
+ # Renderer for Image-based 3D Reasoning', ICCV 2019
78
+ self.blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
79
+ self.raster_settings_soft = RasterizationSettings(
80
+ image_size=image_size, # 128
81
+ blur_radius=np.log(1. / 1e-4 - 1.)*self.blend_params.sigma,
82
+ faces_per_pixel=100) #50,
83
+ # Renderer for Image-based 3D Reasoning', body part segmentation
84
+ self.blend_params_parts = BlendParams(sigma=2*1e-4, gamma=1e-4)
85
+ self.raster_settings_soft_parts = RasterizationSettings(
86
+ image_size=image_size, # 128
87
+ blur_radius=np.log(1. / 1e-4 - 1.)*self.blend_params_parts.sigma,
88
+ faces_per_pixel=60) #50,
89
+ # settings for visualization renderer
90
+ self.raster_settings_vis = RasterizationSettings(
91
+ image_size=image_size,
92
+ blur_radius=0.0,
93
+ faces_per_pixel=1)
94
+
95
+ def _get_cam(self, focal_lengths):
96
+ device = focal_lengths.device
97
+ bs = focal_lengths.shape[0]
98
+ if pytorch3d.__version__ == '0.2.5':
99
+ cameras = PerspectiveCameras(device=device,
100
+ focal_length=focal_lengths.repeat((1, 2)),
101
+ principal_point=self.principal_point.repeat((bs, 1)),
102
+ R=self.R.repeat((bs, 1, 1)), T=self.T.repeat((bs, 1)),
103
+ image_size=self.image_size.repeat((bs, 1)))
104
+ elif pytorch3d.__version__ == '0.6.1':
105
+ cameras = PerspectiveCameras(device=device, in_ndc=False,
106
+ focal_length=focal_lengths.repeat((1, 2)),
107
+ principal_point=self.principal_point.repeat((bs, 1)),
108
+ R=self.R.repeat((bs, 1, 1)), T=self.T.repeat((bs, 1)),
109
+ image_size=self.image_size.repeat((bs, 1)))
110
+ else:
111
+ print('this part depends on the version of pytorch3d, code was developed with 0.2.5')
112
+ raise ValueError
113
+ return cameras
114
+
115
+ def _get_visualization_from_mesh(self, mesh, cameras, lights=None):
116
+ # color renderer for visualization
117
+ with torch.no_grad():
118
+ device = mesh.device
119
+ # renderer for visualization
120
+ if lights is None:
121
+ lights = PointLights(device=device, location=[[0.0, 0.0, 3.0]])
122
+ vis_renderer = MeshRenderer(
123
+ rasterizer=MeshRasterizer(
124
+ cameras=cameras,
125
+ raster_settings=self.raster_settings_vis),
126
+ shader=HardPhongShader(
127
+ device=device,
128
+ cameras=cameras,
129
+ lights=lights))
130
+ # render image:
131
+ visualization = vis_renderer(mesh).permute(0, 3, 1, 2)[:, :3, :, :]
132
+ return visualization
133
+
134
+
135
+ def calculate_vertex_visibility(self, vertices, faces, focal_lengths, soft=False):
136
+ tex = torch.ones_like(vertices) * self.mesh_color_0 # (1, V, 3)
137
+ textures = Textures(verts_rgb=tex)
138
+ mesh = Meshes(verts=vertices, faces=faces, textures=textures)
139
+ cameras = self._get_cam(focal_lengths)
140
+ # NEW: use the rasterizer to check vertex visibility
141
+ # see: https://github.com/facebookresearch/pytorch3d/issues/126
142
+ # Get a rasterizer
143
+ if soft:
144
+ rasterizer = MeshRasterizer(cameras=cameras,
145
+ raster_settings=self.raster_settings_soft)
146
+ else:
147
+ rasterizer = MeshRasterizer(cameras=cameras,
148
+ raster_settings=self.raster_settings_vis)
149
+ # Get the output from rasterization
150
+ fragments = rasterizer(mesh)
151
+ # pix_to_face is of shape (N, H, W, 1)
152
+ pix_to_face = fragments.pix_to_face
153
+ # (F, 3) where F is the total number of faces across all the meshes in the batch
154
+ packed_faces = mesh.faces_packed()
155
+ # (V, 3) where V is the total number of verts across all the meshes in the batch
156
+ packed_verts = mesh.verts_packed()
157
+ vertex_visibility_map = torch.zeros(packed_verts.shape[0]) # (V,)
158
+ # Indices of unique visible faces
159
+ visible_faces = pix_to_face.unique() # [0] # (num_visible_faces )
160
+ # Get Indices of unique visible verts using the vertex indices in the faces
161
+ visible_verts_idx = packed_faces[visible_faces] # (num_visible_faces, 3)
162
+ unique_visible_verts_idx = torch.unique(visible_verts_idx) # (num_visible_verts, )
163
+ # Update visibility indicator to 1 for all visible vertices
164
+ vertex_visibility_map[unique_visible_verts_idx] = 1.0
165
+ # since all meshes have the same amount of vertices, we can reshape the result
166
+ bs = vertices.shape[0]
167
+ vertex_visibility_map_resh = vertex_visibility_map.reshape((bs, -1))
168
+ return pix_to_face, vertex_visibility_map_resh
169
+
170
+
171
+ def get_torch_meshes(self, vertices, faces, color=0):
172
+ # create pytorch mesh
173
+ if color == 0:
174
+ mesh_color = self.mesh_color_0
175
+ else:
176
+ mesh_color = self.mesh_color_1
177
+ tex = torch.ones_like(vertices) * mesh_color # (1, V, 3)
178
+ textures = Textures(verts_rgb=tex)
179
+ mesh = Meshes(verts=vertices, faces=faces, textures=textures)
180
+ return mesh
181
+
182
+
183
+ def get_visualization_nograd(self, vertices, faces, focal_lengths, color=0):
184
+ # vertices: torch.Size([bs, 3889, 3])
185
+ # faces: torch.Size([bs, 7774, 3]), int
186
+ # focal_lengths: torch.Size([bs, 1])
187
+ device = vertices.device
188
+ # create cameras
189
+ cameras = self._get_cam(focal_lengths)
190
+ # create pytorch mesh
191
+ if color == 0:
192
+ mesh_color = self.mesh_color_0 # blue
193
+ elif color == 1:
194
+ mesh_color = self.mesh_color_1
195
+ elif color == 2:
196
+ MESH_COLOR_2 = [240, 250, 240] # white
197
+ mesh_color = torch.FloatTensor(MESH_COLOR_2).to(device)
198
+ elif color == 3:
199
+ # MESH_COLOR_3 = [223, 0, 172] # pink
200
+ # MESH_COLOR_3 = [245, 245, 220] # beige
201
+ MESH_COLOR_3 = [166, 173, 164]
202
+ mesh_color = torch.FloatTensor(MESH_COLOR_3).to(device)
203
+ else:
204
+ MESH_COLOR_2 = [240, 250, 240]
205
+ mesh_color = torch.FloatTensor(MESH_COLOR_2).to(device)
206
+ tex = torch.ones_like(vertices) * mesh_color # (1, V, 3)
207
+ textures = Textures(verts_rgb=tex)
208
+ mesh = Meshes(verts=vertices, faces=faces, textures=textures)
209
+ # render mesh (no gradients)
210
+ # lights = PointLights(device=device, location=[[0.0, 0.0, 3.0]])
211
+ # lights = PointLights(device=device, location=[[2.0, 2.0, -2.0]])
212
+ lights = DirectionalLights(device=device, direction=[[0.0, -5.0, -10.0]])
213
+ visualization = self._get_visualization_from_mesh(mesh, cameras, lights=lights)
214
+ return visualization
215
+
216
+ def project_points(self, points, focal_lengths=None, cameras=None):
217
+ # points: torch.Size([bs, n_points, 3])
218
+ # either focal_lengths or cameras is needed:
219
+ # focal_lenghts: torch.Size([bs, 1])
220
+ # cameras: pytorch camera, for example PerspectiveCameras()
221
+ bs = points.shape[0]
222
+ device = points.device
223
+ screen_size = self.image_size.repeat((bs, 1))
224
+ if cameras is None:
225
+ cameras = self._get_cam(focal_lengths)
226
+ if pytorch3d.__version__ == '0.2.5':
227
+ proj_points_orig = cameras.transform_points_screen(points, screen_size)[:, :, [1, 0]] # used in the original virtuel environment (for cvpr BARC submission)
228
+ elif pytorch3d.__version__ == '0.6.1':
229
+ proj_points_orig = cameras.transform_points_screen(points)[:, :, [1, 0]]
230
+ else:
231
+ print('this part depends on the version of pytorch3d, code was developed with 0.2.5')
232
+ raise ValueError
233
+ # flip, otherwise the 1st and 2nd row are exchanged compared to the ground truth
234
+ proj_points = torch.flip(proj_points_orig, [2])
235
+ # --- project points 'manually'
236
+ # j_proj = project_points_p3d(image_size, focal_length, points, device)
237
+ return proj_points
238
+
239
+ def forward(self, vertices, points, faces, focal_lengths, color=None):
240
+ # vertices: torch.Size([bs, 3889, 3])
241
+ # points: torch.Size([bs, n_points, 3]) (or None)
242
+ # faces: torch.Size([bs, 7774, 3]), int
243
+ # focal_lengths: torch.Size([bs, 1])
244
+ # color: if None we don't render a visualization, else it should
245
+ # either be 0 or 1
246
+ # ---> important: results are around 0.5 pixels off compared to chumpy!
247
+ # have a look at renderer.py for an explanation
248
+ # create cameras
249
+ cameras = self._get_cam(focal_lengths)
250
+ # create pytorch mesh
251
+ if color is None or color == 0:
252
+ mesh_color = self.mesh_color_0
253
+ else:
254
+ mesh_color = self.mesh_color_1
255
+ tex = torch.ones_like(vertices) * mesh_color # (1, V, 3)
256
+ textures = Textures(verts_rgb=tex)
257
+ mesh = Meshes(verts=vertices, faces=faces, textures=textures)
258
+ # silhouette renderer
259
+ renderer_silh = MeshRenderer(
260
+ rasterizer=MeshRasterizer(
261
+ cameras=cameras,
262
+ raster_settings=self.raster_settings_soft),
263
+ shader=SoftSilhouetteShader(blend_params=self.blend_params))
264
+ # project silhouette
265
+ silh_images = renderer_silh(mesh)[..., -1].unsqueeze(1)
266
+ # project points
267
+ if points is None:
268
+ proj_points = None
269
+ else:
270
+ proj_points = self.project_points(points=points, cameras=cameras)
271
+ if color is not None:
272
+ # color renderer for visualization (no gradients)
273
+ visualization = self._get_visualization_from_mesh(mesh, cameras)
274
+ return silh_images, proj_points, visualization
275
+ else:
276
+ return silh_images, proj_points
277
+
278
+
279
+
280
+
src/smal_pytorch/smal_model/batch_lbs.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Adjusted version of other PyTorch implementation of the SMAL/SMPL model
3
+ see:
4
+ 1.) https://github.com/silviazuffi/smalst/blob/master/smal_model/smal_torch.py
5
+ 2.) https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py
6
+ '''
7
+
8
+ from __future__ import absolute_import
9
+ from __future__ import division
10
+ from __future__ import print_function
11
+
12
+ import torch
13
+ import numpy as np
14
+
15
+
16
+ def batch_skew(vec, batch_size=None):
17
+ """
18
+ vec is N x 3, batch_size is int
19
+
20
+ returns N x 3 x 3. Skew_sym version of each matrix.
21
+ """
22
+ device = vec.device
23
+ if batch_size is None:
24
+ batch_size = vec.shape.as_list()[0]
25
+ col_inds = torch.LongTensor([1, 2, 3, 5, 6, 7])
26
+ indices = torch.reshape(torch.reshape(torch.arange(0, batch_size) * 9, [-1, 1]) + col_inds, [-1, 1])
27
+ updates = torch.reshape(
28
+ torch.stack(
29
+ [
30
+ -vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
31
+ vec[:, 0]
32
+ ],
33
+ dim=1), [-1])
34
+ out_shape = [batch_size * 9]
35
+ res = torch.Tensor(np.zeros(out_shape[0])).to(device=device)
36
+ res[np.array(indices.flatten())] = updates
37
+ res = torch.reshape(res, [batch_size, 3, 3])
38
+
39
+ return res
40
+
41
+
42
+
43
+ def batch_rodrigues(theta):
44
+ """
45
+ Theta is Nx3
46
+ """
47
+ device = theta.device
48
+ batch_size = theta.shape[0]
49
+
50
+ angle = (torch.norm(theta + 1e-8, p=2, dim=1)).unsqueeze(-1)
51
+ r = (torch.div(theta, angle)).unsqueeze(-1)
52
+
53
+ angle = angle.unsqueeze(-1)
54
+ cos = torch.cos(angle)
55
+ sin = torch.sin(angle)
56
+
57
+ outer = torch.matmul(r, r.transpose(1,2))
58
+
59
+ eyes = torch.eye(3).unsqueeze(0).repeat([batch_size, 1, 1]).to(device=device)
60
+ H = batch_skew(r, batch_size=batch_size)
61
+ R = cos * eyes + (1 - cos) * outer + sin * H
62
+
63
+ return R
64
+
65
+ def batch_lrotmin(theta):
66
+ """
67
+ Output of this is used to compute joint-to-pose blend shape mapping.
68
+ Equation 9 in SMPL paper.
69
+
70
+
71
+ Args:
72
+ pose: `Tensor`, N x 72 vector holding the axis-angle rep of K joints.
73
+ This includes the global rotation so K=24
74
+
75
+ Returns
76
+ diff_vec : `Tensor`: N x 207 rotation matrix of 23=(K-1) joints with identity subtracted.,
77
+ """
78
+ # Ignore global rotation
79
+ theta = theta[:,3:]
80
+
81
+ Rs = batch_rodrigues(torch.reshape(theta, [-1,3]))
82
+ lrotmin = torch.reshape(Rs - torch.eye(3), [-1, 207])
83
+
84
+ return lrotmin
85
+
86
+ def batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):
87
+ """
88
+ Computes absolute joint locations given pose.
89
+
90
+ rotate_base: if True, rotates the global rotation by 90 deg in x axis.
91
+ if False, this is the original SMPL coordinate.
92
+
93
+ Args:
94
+ Rs: N x 24 x 3 x 3 rotation vector of K joints
95
+ Js: N x 24 x 3, joint locations before posing
96
+ parent: 24 holding the parent id for each index
97
+
98
+ Returns
99
+ new_J : `Tensor`: N x 24 x 3 location of absolute joints
100
+ A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
101
+ """
102
+ device = Rs.device
103
+ if rotate_base:
104
+ print('Flipping the SMPL coordinate frame!!!!')
105
+ rot_x = torch.Tensor([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
106
+ rot_x = torch.reshape(torch.repeat(rot_x, [N, 1]), [N, 3, 3]) # In tf it was tile
107
+ root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)
108
+ else:
109
+ root_rotation = Rs[:, 0, :, :]
110
+
111
+ # Now Js is N x 24 x 3 x 1
112
+ Js = Js.unsqueeze(-1)
113
+ N = Rs.shape[0]
114
+
115
+ def make_A(R, t):
116
+ # Rs is N x 3 x 3, ts is N x 3 x 1
117
+ R_homo = torch.nn.functional.pad(R, (0,0,0,1,0,0))
118
+ t_homo = torch.cat([t, torch.ones([N, 1, 1]).to(device=device)], 1)
119
+ return torch.cat([R_homo, t_homo], 2)
120
+
121
+ A0 = make_A(root_rotation, Js[:, 0])
122
+ results = [A0]
123
+ for i in range(1, parent.shape[0]):
124
+ j_here = Js[:, i] - Js[:, parent[i]]
125
+ A_here = make_A(Rs[:, i], j_here)
126
+ res_here = torch.matmul(
127
+ results[parent[i]], A_here)
128
+ results.append(res_here)
129
+
130
+ # 10 x 24 x 4 x 4
131
+ results = torch.stack(results, dim=1)
132
+
133
+ new_J = results[:, :, :3, 3]
134
+
135
+ # --- Compute relative A: Skinning is based on
136
+ # how much the bone moved (not the final location of the bone)
137
+ # but (final_bone - init_bone)
138
+ # ---
139
+ Js_w0 = torch.cat([Js, torch.zeros([N, 35, 1, 1]).to(device=device)], 2)
140
+ init_bone = torch.matmul(results, Js_w0)
141
+ # Append empty 4 x 3:
142
+ init_bone = torch.nn.functional.pad(init_bone, (3,0,0,0,0,0,0,0))
143
+ A = results - init_bone
144
+
145
+ return new_J, A
146
+
147
+
148
+ #########################################################################################
149
+
150
+ def get_bone_length_scales(part_list, betas_logscale):
151
+ leg_joints = list(range(7,11)) + list(range(11,15)) + list(range(17,21)) + list(range(21,25))
152
+ tail_joints = list(range(25, 32))
153
+ ear_joints = [33, 34]
154
+ neck_joints = [15, 6] # ?
155
+ core_joints = [4, 5] # ?
156
+ mouth_joints = [16, 32]
157
+ log_scales = torch.zeros(betas_logscale.shape[0], 35).to(betas_logscale.device)
158
+ for ind, part in enumerate(part_list):
159
+ if part == 'legs_l':
160
+ log_scales[:, leg_joints] = betas_logscale[:, ind][:, None]
161
+ elif part == 'tail_l':
162
+ log_scales[:, tail_joints] = betas_logscale[:, ind][:, None]
163
+ elif part == 'ears_l':
164
+ log_scales[:, ear_joints] = betas_logscale[:, ind][:, None]
165
+ elif part == 'neck_l':
166
+ log_scales[:, neck_joints] = betas_logscale[:, ind][:, None]
167
+ elif part == 'core_l':
168
+ log_scales[:, core_joints] = betas_logscale[:, ind][:, None]
169
+ elif part == 'head_l':
170
+ log_scales[:, mouth_joints] = betas_logscale[:, ind][:, None]
171
+ else:
172
+ pass
173
+ all_scales = torch.exp(log_scales)
174
+ return all_scales[:, 1:] # don't count root
175
+
176
+ def get_beta_scale_mask(part_list):
177
+ # which joints belong to which bodypart
178
+ leg_joints = list(range(7,11)) + list(range(11,15)) + list(range(17,21)) + list(range(21,25))
179
+ tail_joints = list(range(25, 32))
180
+ ear_joints = [33, 34]
181
+ neck_joints = [15, 6] # ?
182
+ core_joints = [4, 5] # ?
183
+ mouth_joints = [16, 32]
184
+ n_b_log = len(part_list) #betas_logscale.shape[1] # 8 # 6
185
+ beta_scale_mask = torch.zeros(35, 3, n_b_log) # .to(betas_logscale.device)
186
+ for ind, part in enumerate(part_list):
187
+ if part == 'legs_l':
188
+ beta_scale_mask[leg_joints, [2], [ind]] = 1.0 # Leg lengthening
189
+ elif part == 'legs_f':
190
+ beta_scale_mask[leg_joints, [0], [ind]] = 1.0 # Leg fatness
191
+ beta_scale_mask[leg_joints, [1], [ind]] = 1.0 # Leg fatness
192
+ elif part == 'tail_l':
193
+ beta_scale_mask[tail_joints, [0], [ind]] = 1.0 # Tail lengthening
194
+ elif part == 'tail_f':
195
+ beta_scale_mask[tail_joints, [1], [ind]] = 1.0 # Tail fatness
196
+ beta_scale_mask[tail_joints, [2], [ind]] = 1.0 # Tail fatness
197
+ elif part == 'ears_y':
198
+ beta_scale_mask[ear_joints, [1], [ind]] = 1.0 # Ear y
199
+ elif part == 'ears_l':
200
+ beta_scale_mask[ear_joints, [2], [ind]] = 1.0 # Ear z
201
+ elif part == 'neck_l':
202
+ beta_scale_mask[neck_joints, [0], [ind]] = 1.0 # Neck lengthening
203
+ elif part == 'neck_f':
204
+ beta_scale_mask[neck_joints, [1], [ind]] = 1.0 # Neck fatness
205
+ beta_scale_mask[neck_joints, [2], [ind]] = 1.0 # Neck fatness
206
+ elif part == 'core_l':
207
+ beta_scale_mask[core_joints, [0], [ind]] = 1.0 # Core lengthening
208
+ # beta_scale_mask[core_joints, [1], [ind]] = 1.0 # Core fatness (height)
209
+ elif part == 'core_fs':
210
+ beta_scale_mask[core_joints, [2], [ind]] = 1.0 # Core fatness (side)
211
+ elif part == 'head_l':
212
+ beta_scale_mask[mouth_joints, [0], [ind]] = 1.0 # Head lengthening
213
+ elif part == 'head_f':
214
+ beta_scale_mask[mouth_joints, [1], [ind]] = 1.0 # Head fatness 0
215
+ beta_scale_mask[mouth_joints, [2], [ind]] = 1.0 # Head fatness 1
216
+ else:
217
+ print(part + ' not available')
218
+ raise ValueError
219
+ beta_scale_mask = torch.transpose(
220
+ beta_scale_mask.reshape(35*3, n_b_log), 0, 1)
221
+ return beta_scale_mask
222
+
223
+ def batch_global_rigid_transformation_biggs(Rs, Js, parent, scale_factors_3x3, rotate_base = False, betas_logscale=None, opts=None):
224
+ """
225
+ Computes absolute joint locations given pose.
226
+
227
+ rotate_base: if True, rotates the global rotation by 90 deg in x axis.
228
+ if False, this is the original SMPL coordinate.
229
+
230
+ Args:
231
+ Rs: N x 24 x 3 x 3 rotation vector of K joints
232
+ Js: N x 24 x 3, joint locations before posing
233
+ parent: 24 holding the parent id for each index
234
+
235
+ Returns
236
+ new_J : `Tensor`: N x 24 x 3 location of absolute joints
237
+ A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
238
+ """
239
+ if rotate_base:
240
+ print('Flipping the SMPL coordinate frame!!!!')
241
+ rot_x = torch.Tensor([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
242
+ rot_x = torch.reshape(torch.repeat(rot_x, [N, 1]), [N, 3, 3]) # In tf it was tile
243
+ root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)
244
+ else:
245
+ root_rotation = Rs[:, 0, :, :]
246
+
247
+ # Now Js is N x 24 x 3 x 1
248
+ Js = Js.unsqueeze(-1)
249
+ N = Rs.shape[0]
250
+
251
+ Js_orig = Js.clone()
252
+
253
+ def make_A(R, t):
254
+ # Rs is N x 3 x 3, ts is N x 3 x 1
255
+ R_homo = torch.nn.functional.pad(R, (0,0,0,1,0,0))
256
+ t_homo = torch.cat([t, torch.ones([N, 1, 1]).to(Rs.device)], 1)
257
+ return torch.cat([R_homo, t_homo], 2)
258
+
259
+ A0 = make_A(root_rotation, Js[:, 0])
260
+ results = [A0]
261
+ for i in range(1, parent.shape[0]):
262
+ j_here = Js[:, i] - Js[:, parent[i]]
263
+ try:
264
+ s_par_inv = torch.inverse(scale_factors_3x3[:, parent[i]])
265
+ except:
266
+ # import pdb; pdb.set_trace()
267
+ s_par_inv = torch.max(scale_factors_3x3[:, parent[i]], 0.01*torch.eye((3))[None, :, :].to(scale_factors_3x3.device))
268
+ rot = Rs[:, i]
269
+ s = scale_factors_3x3[:, i]
270
+
271
+ rot_new = s_par_inv @ rot @ s
272
+
273
+ A_here = make_A(rot_new, j_here)
274
+ res_here = torch.matmul(
275
+ results[parent[i]], A_here)
276
+
277
+ results.append(res_here)
278
+
279
+ # 10 x 24 x 4 x 4
280
+ results = torch.stack(results, dim=1)
281
+
282
+ # scale updates
283
+ new_J = results[:, :, :3, 3]
284
+
285
+ # --- Compute relative A: Skinning is based on
286
+ # how much the bone moved (not the final location of the bone)
287
+ # but (final_bone - init_bone)
288
+ # ---
289
+ Js_w0 = torch.cat([Js_orig, torch.zeros([N, 35, 1, 1]).to(Rs.device)], 2)
290
+ init_bone = torch.matmul(results, Js_w0)
291
+ # Append empty 4 x 3:
292
+ init_bone = torch.nn.functional.pad(init_bone, (3,0,0,0,0,0,0,0))
293
+ A = results - init_bone
294
+
295
+ return new_J, A
src/smal_pytorch/smal_model/smal_basics.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Adjusted version of other PyTorch implementation of the SMAL/SMPL model
3
+ see:
4
+ 1.) https://github.com/silviazuffi/smalst/blob/master/smal_model/smal_torch.py
5
+ 2.) https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py
6
+ '''
7
+
8
+ import os
9
+ import pickle as pkl
10
+ import json
11
+ import numpy as np
12
+ import pickle as pkl
13
+
14
+ import os
15
+ import sys
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
17
+ from configs.SMAL_configs import SMAL_DATA_DIR, SYMMETRY_INDS_FILE
18
+
19
+ # model_dir = 'smalst/smpl_models/'
20
+ # FILE_DIR = os.path.dirname(os.path.realpath(__file__))
21
+ model_dir = SMAL_DATA_DIR # os.path.join(FILE_DIR, '..', 'smpl_models/')
22
+ symmetry_inds_file = SYMMETRY_INDS_FILE # os.path.join(FILE_DIR, '..', 'smpl_models/symmetry_inds.json')
23
+ with open(symmetry_inds_file) as f:
24
+ symmetry_inds_dict = json.load(f)
25
+ LEFT_INDS = np.asarray(symmetry_inds_dict['left_inds'])
26
+ RIGHT_INDS = np.asarray(symmetry_inds_dict['right_inds'])
27
+ CENTER_INDS = np.asarray(symmetry_inds_dict['center_inds'])
28
+
29
+
30
+ def get_symmetry_indices():
31
+ sym_dict = {'left': LEFT_INDS,
32
+ 'right': RIGHT_INDS,
33
+ 'center': CENTER_INDS}
34
+ return sym_dict
35
+
36
+ def verify_symmetry(shapedirs, center_inds=CENTER_INDS, left_inds=LEFT_INDS, right_inds=RIGHT_INDS):
37
+ # shapedirs: (3889, 3, n_sh)
38
+ assert (shapedirs[center_inds, 1, :] == 0.0).all()
39
+ assert (shapedirs[right_inds, 1, :] == -shapedirs[left_inds, 1, :]).all()
40
+ return
41
+
42
+ def from_shapedirs_to_shapedirs_half(shapedirs, center_inds=CENTER_INDS, left_inds=LEFT_INDS, right_inds=RIGHT_INDS, verify=False):
43
+ # shapedirs: (3889, 3, n_sh)
44
+ # shapedirs_half: (2012, 3, n_sh)
45
+ selected_inds = np.concatenate((center_inds, left_inds), axis=0)
46
+ shapedirs_half = shapedirs[selected_inds, :, :]
47
+ if verify:
48
+ verify_symmetry(shapedirs)
49
+ else:
50
+ shapedirs_half[:center_inds.shape[0], 1, :] = 0.0
51
+ return shapedirs_half
52
+
53
+ def from_shapedirs_half_to_shapedirs(shapedirs_half, center_inds=CENTER_INDS, left_inds=LEFT_INDS, right_inds=RIGHT_INDS):
54
+ # shapedirs_half: (2012, 3, n_sh)
55
+ # shapedirs: (3889, 3, n_sh)
56
+ shapedirs = np.zeros((center_inds.shape[0] + 2*left_inds.shape[0], 3, shapedirs_half.shape[2]))
57
+ shapedirs[center_inds, :, :] = shapedirs_half[:center_inds.shape[0], :, :]
58
+ shapedirs[left_inds, :, :] = shapedirs_half[center_inds.shape[0]:, :, :]
59
+ shapedirs[right_inds, :, :] = shapedirs_half[center_inds.shape[0]:, :, :]
60
+ shapedirs[right_inds, 1, :] = - shapedirs_half[center_inds.shape[0]:, 1, :]
61
+ return shapedirs
62
+
63
+ def align_smal_template_to_symmetry_axis(v, subtract_mean=True):
64
+ # These are the indexes of the points that are on the symmetry axis
65
+ I = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 37, 55, 119, 120, 163, 209, 210, 211, 213, 216, 227, 326, 395, 452, 578, 910, 959, 964, 975, 976, 977, 1172, 1175, 1176, 1178, 1194, 1243, 1739, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1870, 1919, 1960, 1961, 1965, 1967, 2003]
66
+ if subtract_mean:
67
+ v = v - np.mean(v)
68
+ y = np.mean(v[I,1])
69
+ v[:,1] = v[:,1] - y
70
+ v[I,1] = 0
71
+ left_inds = LEFT_INDS
72
+ right_inds = RIGHT_INDS
73
+ center_inds = CENTER_INDS
74
+ v[right_inds, :] = np.array([1,-1,1])*v[left_inds, :]
75
+ try:
76
+ assert(len(left_inds) == len(right_inds))
77
+ except:
78
+ import pdb; pdb.set_trace()
79
+ return v, left_inds, right_inds, center_inds
80
+
81
+
82
+
src/smal_pytorch/smal_model/smal_torch_new.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PyTorch implementation of the SMAL/SMPL model
3
+ see:
4
+ 1.) https://github.com/silviazuffi/smalst/blob/master/smal_model/smal_torch.py
5
+ 2.) https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py
6
+ main changes compared to SMALST and WLDO:
7
+ * new model
8
+ (/ps/scratch/nrueegg/new_projects/side_packages/SMALify/new_smal_pca/results/my_tposeref_results_3/)
9
+ dogs are part of the pca to create the model
10
+ al meshes are centered around their root joint
11
+ the animals are all scaled such that their body length (butt to breast) is 1
12
+ X_init = np.concatenate((vertices_dogs, vertices_smal), axis=0) # vertices_dogs
13
+ X = []
14
+ for ind in range(0, X_init.shape[0]):
15
+ X_tmp, _, _, _ = align_smal_template_to_symmetry_axis(X_init[ind, :, :], subtract_mean=True) # not sure if this is necessary
16
+ X.append(X_tmp)
17
+ X = np.asarray(X)
18
+ # define points which will be used for normalization
19
+ idxs_front = [6, 16, 8, 964] # [1172, 6, 16, 8, 964]
20
+ idxs_back = [174, 2148, 175, 2149] # not in the middle, but pairs
21
+ reg_j = np.asarray(dd['J_regressor'].todense())
22
+ # normalize the meshes such that X_frontback_dist is 1 and the root joint is in the center (0, 0, 0)
23
+ X_front = X[:, idxs_front, :].mean(axis=1)
24
+ X_back = X[:, idxs_back, :].mean(axis=1)
25
+ X_frontback_dist = np.sqrt(((X_front - X_back)**2).sum(axis=1))
26
+ X = X / X_frontback_dist[:, None, None]
27
+ X_j0 = np.sum(X[:, reg_j[0, :]>0, :] * reg_j[0, (reg_j[0, :]>0)][None, :, None], axis=1)
28
+ X = X - X_j0[:, None, :]
29
+ * add limb length changes the same way as in WLDO
30
+ * overall scale factor is added
31
+ """
32
+
33
+ from __future__ import absolute_import
34
+ from __future__ import division
35
+ from __future__ import print_function
36
+
37
+ import numpy as np
38
+ import torch
39
+ import chumpy as ch
40
+ import os.path
41
+ from torch import nn
42
+ from torch.autograd import Variable
43
+ import pickle as pkl
44
+ from .batch_lbs import batch_rodrigues, batch_global_rigid_transformation, batch_global_rigid_transformation_biggs, get_bone_length_scales, get_beta_scale_mask
45
+
46
+ from .smal_basics import align_smal_template_to_symmetry_axis, get_symmetry_indices
47
+
48
+ import os
49
+ import sys
50
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
51
+ from configs.SMAL_configs import KEY_VIDS, CANONICAL_MODEL_JOINTS, IDXS_BONES_NO_REDUNDANCY, SMAL_MODEL_PATH
52
+
53
+ from smal_pytorch.utils import load_vertex_colors
54
+
55
+
56
+ # There are chumpy variables so convert them to numpy.
57
+ def undo_chumpy(x):
58
+ return x if isinstance(x, np.ndarray) else x.r
59
+
60
+ # class SMAL(object):
61
+ class SMAL(nn.Module):
62
+ def __init__(self, pkl_path=SMAL_MODEL_PATH, n_betas=None, template_name='neutral', use_smal_betas=True, logscale_part_list=None):
63
+ super(SMAL, self).__init__()
64
+
65
+ if logscale_part_list is None:
66
+ self.logscale_part_list = ['legs_l', 'legs_f', 'tail_l', 'tail_f', 'ears_y', 'ears_l', 'head_l']
67
+ self.betas_scale_mask = get_beta_scale_mask(part_list=self.logscale_part_list)
68
+ self.num_betas_logscale = len(self.logscale_part_list)
69
+
70
+ self.use_smal_betas = use_smal_betas
71
+
72
+ # -- Load SMPL params --
73
+ try:
74
+ with open(pkl_path, 'r') as f:
75
+ dd = pkl.load(f)
76
+ except (UnicodeDecodeError, TypeError) as e:
77
+ with open(pkl_path, 'rb') as file:
78
+ u = pkl._Unpickler(file)
79
+ u.encoding = 'latin1'
80
+ dd = u.load()
81
+
82
+ self.f = dd['f']
83
+ self.register_buffer('faces', torch.from_numpy(self.f.astype(int)))
84
+
85
+ # get the correct template (mean shape)
86
+ if template_name=='neutral':
87
+ v_template = dd['v_template']
88
+ v = v_template
89
+ else:
90
+ raise NotImplementedError
91
+
92
+ # Mean template vertices
93
+ self.register_buffer('v_template', torch.Tensor(v))
94
+ # Size of mesh [Number of vertices, 3]
95
+ self.size = [self.v_template.shape[0], 3]
96
+ self.num_betas = dd['shapedirs'].shape[-1]
97
+ # symmetry indices
98
+ self.sym_ids_dict = get_symmetry_indices()
99
+
100
+ # Shape blend shape basis
101
+ shapedir = np.reshape(undo_chumpy(dd['shapedirs']), [-1, self.num_betas]).T
102
+ shapedir.flags['WRITEABLE'] = True # not sure why this is necessary
103
+ self.register_buffer('shapedirs', torch.Tensor(shapedir))
104
+
105
+ # Regressor for joint locations given shape
106
+ self.register_buffer('J_regressor', torch.Tensor(dd['J_regressor'].T.todense()))
107
+
108
+ # Pose blend shape basis
109
+ num_pose_basis = dd['posedirs'].shape[-1]
110
+
111
+ posedirs = np.reshape(undo_chumpy(dd['posedirs']), [-1, num_pose_basis]).T
112
+ self.register_buffer('posedirs', torch.Tensor(posedirs))
113
+
114
+ # indices of parents for each joints
115
+ self.parents = dd['kintree_table'][0].astype(np.int32)
116
+
117
+ # LBS weights
118
+ self.register_buffer('weights', torch.Tensor(undo_chumpy(dd['weights'])))
119
+
120
+
121
+ def _caclulate_bone_lengths_from_J(self, J, betas_logscale):
122
+ # NEW: calculate bone lengths:
123
+ all_bone_lengths_list = []
124
+ for i in range(1, self.parents.shape[0]):
125
+ bone_vec = J[:, i] - J[:, self.parents[i]]
126
+ bone_length = torch.sqrt(torch.sum(bone_vec ** 2, axis=1))
127
+ all_bone_lengths_list.append(bone_length)
128
+ all_bone_lengths = torch.stack(all_bone_lengths_list)
129
+ # some bones are pairs, it is enough to take one of the two bones
130
+ all_bone_length_scales = get_bone_length_scales(self.logscale_part_list, betas_logscale)
131
+ all_bone_lengths = all_bone_lengths.permute((1,0)) * all_bone_length_scales
132
+
133
+ return all_bone_lengths #.permute((1,0))
134
+
135
+
136
+ def caclulate_bone_lengths(self, beta, betas_logscale, shapedirs_sel=None, short=True):
137
+ nBetas = beta.shape[1]
138
+
139
+ # 1. Add shape blend shapes
140
+ # do we use the original shapedirs or a new set of selected shapedirs?
141
+ if shapedirs_sel is None:
142
+ shapedirs_sel = self.shapedirs[:nBetas,:]
143
+ else:
144
+ assert shapedirs_sel.shape[0] == nBetas
145
+ v_shaped = self.v_template + torch.reshape(torch.matmul(beta, shapedirs_sel), [-1, self.size[0], self.size[1]])
146
+
147
+ # 2. Infer shape-dependent joint locations.
148
+ Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
149
+ Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
150
+ Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
151
+ J = torch.stack([Jx, Jy, Jz], dim=2)
152
+
153
+ # calculate bone lengths
154
+ all_bone_lengths = self._caclulate_bone_lengths_from_J(J, betas_logscale)
155
+ selected_bone_lengths = all_bone_lengths[:, IDXS_BONES_NO_REDUNDANCY]
156
+
157
+ if short:
158
+ return selected_bone_lengths
159
+ else:
160
+ return all_bone_lengths
161
+
162
+
163
+
164
+ def __call__(self, beta, betas_limbs, theta=None, pose=None, trans=None, del_v=None, get_skin=True, keyp_conf='red', get_all_info=False, shapedirs_sel=None):
165
+ device = beta.device
166
+
167
+ betas_logscale = betas_limbs
168
+ # NEW: allow that rotation is given as rotation matrices instead of axis angle rotation
169
+ # theta: BSxNJointsx3 or BSx(NJoints*3)
170
+ # pose: NxNJointsx3x3
171
+ if (theta is None) and (pose is None):
172
+ raise ValueError("Either pose (rotation matrices NxNJointsx3x3) or theta (axis angle BSxNJointsx3) must be given")
173
+ elif (theta is not None) and (pose is not None):
174
+ raise ValueError("Not both pose (rotation matrices NxNJointsx3x3) and theta (axis angle BSxNJointsx3) can be given")
175
+
176
+ if True: # self.use_smal_betas:
177
+ nBetas = beta.shape[1]
178
+ else:
179
+ nBetas = 0
180
+
181
+ # 1. Add shape blend shapes
182
+ # do we use the original shapedirs or a new set of selected shapedirs?
183
+ if shapedirs_sel is None:
184
+ shapedirs_sel = self.shapedirs[:nBetas,:]
185
+ else:
186
+ assert shapedirs_sel.shape[0] == nBetas
187
+
188
+ if nBetas > 0:
189
+ if del_v is None:
190
+ v_shaped = self.v_template + torch.reshape(torch.matmul(beta, shapedirs_sel), [-1, self.size[0], self.size[1]])
191
+ else:
192
+ v_shaped = self.v_template + del_v + torch.reshape(torch.matmul(beta, shapedirs_sel), [-1, self.size[0], self.size[1]])
193
+ else:
194
+ if del_v is None:
195
+ v_shaped = self.v_template.unsqueeze(0)
196
+ else:
197
+ v_shaped = self.v_template + del_v
198
+
199
+ # 2. Infer shape-dependent joint locations.
200
+ Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
201
+ Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
202
+ Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
203
+ J = torch.stack([Jx, Jy, Jz], dim=2)
204
+
205
+ # 3. Add pose blend shapes
206
+ # N x 24 x 3 x 3
207
+ if pose is None:
208
+ Rs = torch.reshape( batch_rodrigues(torch.reshape(theta, [-1, 3])), [-1, 35, 3, 3])
209
+ else:
210
+ Rs = pose
211
+ # Ignore global rotation.
212
+ pose_feature = torch.reshape(Rs[:, 1:, :, :] - torch.eye(3).to(device=device), [-1, 306])
213
+
214
+ v_posed = torch.reshape(
215
+ torch.matmul(pose_feature, self.posedirs),
216
+ [-1, self.size[0], self.size[1]]) + v_shaped
217
+
218
+ #-------------------------
219
+ # new: add corrections of bone lengths to the template (before hypothetical pose blend shapes!)
220
+ # see biggs batch_lbs.py
221
+ betas_scale = torch.exp(betas_logscale @ self.betas_scale_mask.to(betas_logscale.device))
222
+ scaling_factors = betas_scale.reshape(-1, 35, 3)
223
+ scale_factors_3x3 = torch.diag_embed(scaling_factors, dim1=-2, dim2=-1)
224
+
225
+ # 4. Get the global joint location
226
+ # self.J_transformed, A = batch_global_rigid_transformation(Rs, J, self.parents)
227
+ self.J_transformed, A = batch_global_rigid_transformation_biggs(Rs, J, self.parents, scale_factors_3x3, betas_logscale=betas_logscale)
228
+
229
+ # 2-BONES. Calculate bone lengths
230
+ all_bone_lengths = self._caclulate_bone_lengths_from_J(J, betas_logscale)
231
+ # selected_bone_lengths = all_bone_lengths[:, IDXS_BONES_NO_REDUNDANCY]
232
+ #-------------------------
233
+
234
+ # 5. Do skinning:
235
+ num_batch = Rs.shape[0]
236
+
237
+ weights_t = self.weights.repeat([num_batch, 1])
238
+ W = torch.reshape(weights_t, [num_batch, -1, 35])
239
+
240
+
241
+ T = torch.reshape(
242
+ torch.matmul(W, torch.reshape(A, [num_batch, 35, 16])),
243
+ [num_batch, -1, 4, 4])
244
+ v_posed_homo = torch.cat(
245
+ [v_posed, torch.ones([num_batch, v_posed.shape[1], 1]).to(device=device)], 2)
246
+ v_homo = torch.matmul(T, v_posed_homo.unsqueeze(-1))
247
+
248
+ verts = v_homo[:, :, :3, 0]
249
+
250
+ if trans is None:
251
+ trans = torch.zeros((num_batch,3)).to(device=device)
252
+
253
+ verts = verts + trans[:,None,:]
254
+
255
+ # Get joints:
256
+ joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)
257
+ joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)
258
+ joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)
259
+ joints = torch.stack([joint_x, joint_y, joint_z], dim=2)
260
+
261
+ # New... (see https://github.com/benjiebob/SMALify/blob/master/smal_model/smal_torch.py)
262
+ joints = torch.cat([
263
+ joints,
264
+ verts[:, None, 1863], # end_of_nose
265
+ verts[:, None, 26], # chin
266
+ verts[:, None, 2124], # right ear tip
267
+ verts[:, None, 150], # left ear tip
268
+ verts[:, None, 3055], # left eye
269
+ verts[:, None, 1097], # right eye
270
+ ], dim = 1)
271
+
272
+ if keyp_conf == 'blue' or keyp_conf == 'dict':
273
+ # Generate keypoints
274
+ nLandmarks = KEY_VIDS.shape[0] # 24
275
+ j3d = torch.zeros((num_batch, nLandmarks, 3)).to(device=device)
276
+ for j in range(nLandmarks):
277
+ j3d[:, j,:] = torch.mean(verts[:, KEY_VIDS[j],:], dim=1) # translation is already added to the vertices
278
+ joints_blue = j3d
279
+
280
+ joints_red = joints[:, :-6, :]
281
+ joints_green = joints[:, CANONICAL_MODEL_JOINTS, :]
282
+
283
+ if keyp_conf == 'red':
284
+ relevant_joints = joints_red
285
+ elif keyp_conf == 'green':
286
+ relevant_joints = joints_green
287
+ elif keyp_conf == 'blue':
288
+ relevant_joints = joints_blue
289
+ elif keyp_conf == 'dict':
290
+ relevant_joints = {'red': joints_red,
291
+ 'green': joints_green,
292
+ 'blue': joints_blue}
293
+ else:
294
+ raise NotImplementedError
295
+
296
+ if get_all_info:
297
+ return verts, relevant_joints, Rs, all_bone_lengths
298
+ else:
299
+ if get_skin:
300
+ return verts, relevant_joints, Rs # , v_shaped
301
+ else:
302
+ return relevant_joints
303
+
304
+
305
+
306
+
307
+
308
+
309
+
310
+
311
+
312
+
313
+
src/smal_pytorch/utils.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def load_vertex_colors(obj_path):
4
+ v_colors = []
5
+ for line in open(obj_path, "r"):
6
+ if line.startswith('#'): continue
7
+ values = line.split()
8
+ if not values: continue
9
+ if values[0] == 'v':
10
+ v_colors.append(values[4:7])
11
+ else:
12
+ continue
13
+ return np.asarray(v_colors, dtype=np.float32)
src/stacked_hourglass/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from stacked_hourglass.model import hg1, hg2, hg4, hg8
2
+ from stacked_hourglass.predictor import HumanPosePredictor
src/stacked_hourglass/datasets/__init__.py ADDED
File without changes
src/stacked_hourglass/datasets/imgcrops.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import os
4
+ import glob
5
+ import numpy as np
6
+ import torch
7
+ import torch.utils.data as data
8
+
9
+ import sys
10
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
11
+ from configs.anipose_data_info import COMPLETE_DATA_INFO
12
+ from stacked_hourglass.utils.imutils import load_image
13
+ from stacked_hourglass.utils.transforms import crop, color_normalize
14
+ from stacked_hourglass.utils.pilutil import imresize
15
+ from stacked_hourglass.utils.imutils import im_to_torch
16
+ from configs.dataset_path_configs import TEST_IMAGE_CROP_ROOT_DIR
17
+ from configs.data_info import COMPLETE_DATA_INFO_24
18
+
19
+
20
+ class ImgCrops(data.Dataset):
21
+ DATA_INFO = COMPLETE_DATA_INFO_24
22
+ ACC_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16]
23
+
24
+ def __init__(self, img_crop_folder='default', image_path=None, is_train=False, inp_res=256, out_res=64, sigma=1,
25
+ scale_factor=0.25, rot_factor=30, label_type='Gaussian',
26
+ do_augment='default', shorten_dataset_to=None, dataset_mode='keyp_only'):
27
+ assert is_train == False
28
+ assert do_augment == 'default' or do_augment == False
29
+ self.inp_res = inp_res
30
+ if img_crop_folder == 'default':
31
+ self.folder_imgs = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'datasets', 'test_image_crops')
32
+ else:
33
+ self.folder_imgs = img_crop_folder
34
+ name_list = glob.glob(os.path.join(self.folder_imgs, '*.png')) + glob.glob(os.path.join(self.folder_imgs, '*.jpg')) + glob.glob(os.path.join(self.folder_imgs, '*.jpeg'))
35
+ name_list = sorted(name_list)
36
+ self.test_name_list = [name.split('/')[-1] for name in name_list]
37
+ print('len(dataset): ' + str(self.__len__()))
38
+
39
+ def __getitem__(self, index):
40
+ img_name = self.test_name_list[index]
41
+ # load image
42
+ img_path = os.path.join(self.folder_imgs, img_name)
43
+ img = load_image(img_path) # CxHxW
44
+ # prepare image (cropping and color)
45
+ img_max = max(img.shape[1], img.shape[2])
46
+ img_padded = torch.zeros((img.shape[0], img_max, img_max))
47
+ if img_max == img.shape[2]:
48
+ start = (img_max-img.shape[1])//2
49
+ img_padded[:, start:start+img.shape[1], :] = img
50
+ else:
51
+ start = (img_max-img.shape[2])//2
52
+ img_padded[:, :, start:start+img.shape[2]] = img
53
+ img = img_padded
54
+ img_prep = im_to_torch(imresize(img, [self.inp_res, self.inp_res], interp='bilinear'))
55
+ inp = color_normalize(img_prep, self.DATA_INFO.rgb_mean, self.DATA_INFO.rgb_stddev)
56
+ # add the following fields to make it compatible with stanext, most of them are fake
57
+ target_dict = {'index': index, 'center' : -2, 'scale' : -2,
58
+ 'breed_index': -2, 'sim_breed_index': -2,
59
+ 'ind_dataset': 1}
60
+ target_dict['pts'] = np.zeros((self.DATA_INFO.n_keyp, 3))
61
+ target_dict['tpts'] = np.zeros((self.DATA_INFO.n_keyp, 3))
62
+ target_dict['target_weight'] = np.zeros((self.DATA_INFO.n_keyp, 1))
63
+ target_dict['silh'] = np.zeros((self.inp_res, self.inp_res))
64
+ return inp, target_dict
65
+
66
+
67
+ def __len__(self):
68
+ return len(self.test_name_list)
69
+
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+
src/stacked_hourglass/datasets/imgcropslist.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import os
4
+ import glob
5
+ import numpy as np
6
+ import math
7
+ import torch
8
+ import torch.utils.data as data
9
+
10
+ import sys
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
12
+ from configs.anipose_data_info import COMPLETE_DATA_INFO
13
+ from stacked_hourglass.utils.imutils import load_image, im_to_torch
14
+ from stacked_hourglass.utils.transforms import crop, color_normalize
15
+ from stacked_hourglass.utils.pilutil import imresize
16
+ from stacked_hourglass.utils.imutils import im_to_torch
17
+ from configs.data_info import COMPLETE_DATA_INFO_24
18
+
19
+
20
+ class ImgCrops(data.Dataset):
21
+ DATA_INFO = COMPLETE_DATA_INFO_24
22
+ ACC_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16]
23
+
24
+ def __init__(self, image_list, bbox_list=None, inp_res=256, dataset_mode='keyp_only'):
25
+ # the list contains the images directly, not only their paths
26
+ self.image_list = image_list
27
+ self.bbox_list = bbox_list
28
+ self.inp_res = inp_res
29
+ self.test_name_list = []
30
+ for ind in np.arange(0, len(self.image_list)):
31
+ self.test_name_list.append(str(ind))
32
+ print('len(dataset): ' + str(self.__len__()))
33
+
34
+ def __getitem__(self, index):
35
+ '''img_name = self.test_name_list[index]
36
+ # load image
37
+ img_path = os.path.join(self.folder_imgs, img_name)
38
+ img = load_image(img_path) # CxHxW'''
39
+
40
+ # load image
41
+ '''img_hwc = self.image_list[index]
42
+ img = np.rollaxis(img_hwc, 2, 0) '''
43
+ img = im_to_torch(self.image_list[index])
44
+
45
+ # import pdb; pdb.set_trace()
46
+
47
+ # try loading bounding box
48
+ if self.bbox_list is not None:
49
+ bbox = self.bbox_list[index]
50
+ bbox_xywh = [bbox[0][0], bbox[0][1], bbox[1][0]-bbox[0][0], bbox[1][1]-bbox[0][1]]
51
+ bbox_c = [bbox_xywh[0]+0.5*bbox_xywh[2], bbox_xywh[1]+0.5*bbox_xywh[3]]
52
+ bbox_max = max(bbox_xywh[2], bbox_xywh[3])
53
+ bbox_diag = math.sqrt(bbox_xywh[2]**2 + bbox_xywh[3]**2)
54
+ bbox_s = bbox_max / 200. * 256. / 200. # maximum side of the bbox will be 200
55
+ c = torch.Tensor(bbox_c)
56
+ s = bbox_s
57
+ img_prep = crop(img, c, s, [self.inp_res, self.inp_res], rot=0)
58
+
59
+ else:
60
+
61
+ # prepare image (cropping and color)
62
+ img_max = max(img.shape[1], img.shape[2])
63
+ img_padded = torch.zeros((img.shape[0], img_max, img_max))
64
+ if img_max == img.shape[2]:
65
+ start = (img_max-img.shape[1])//2
66
+ img_padded[:, start:start+img.shape[1], :] = img
67
+ else:
68
+ start = (img_max-img.shape[2])//2
69
+ img_padded[:, :, start:start+img.shape[2]] = img
70
+ img = img_padded
71
+ img_prep = im_to_torch(imresize(img, [self.inp_res, self.inp_res], interp='bilinear'))
72
+
73
+ inp = color_normalize(img_prep, self.DATA_INFO.rgb_mean, self.DATA_INFO.rgb_stddev)
74
+ # add the following fields to make it compatible with stanext, most of them are fake
75
+ target_dict = {'index': index, 'center' : -2, 'scale' : -2,
76
+ 'breed_index': -2, 'sim_breed_index': -2,
77
+ 'ind_dataset': 1}
78
+ target_dict['pts'] = np.zeros((self.DATA_INFO.n_keyp, 3))
79
+ target_dict['tpts'] = np.zeros((self.DATA_INFO.n_keyp, 3))
80
+ target_dict['target_weight'] = np.zeros((self.DATA_INFO.n_keyp, 1))
81
+ target_dict['silh'] = np.zeros((self.inp_res, self.inp_res))
82
+ return inp, target_dict
83
+
84
+
85
+ def __len__(self):
86
+ return len(self.image_list)
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
src/stacked_hourglass/datasets/samplers/custom_pair_samplers.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import random
4
+ import copy
5
+ import time
6
+ import warnings
7
+
8
+ from torch.utils.data import Sampler
9
+ from torch._six import int_classes as _int_classes
10
+
11
+ class CustomPairBatchSampler(Sampler):
12
+ """Wraps another sampler to yield a mini-batch of indices.
13
+ The structure of this sampler is way to complicated because it is a shorter/simplified version of
14
+ CustomBatchSampler. The relations between breeds are not relevant for the cvpr 2022 paper, but we kept
15
+ this structure which we were using for the experiments with clade related losses. ToDo: restructure
16
+ this sampler.
17
+ Args:
18
+ data_sampler_info (dict): a dictionnary, containing information about the dataset and breeds.
19
+ batch_size (int): Size of mini-batch.
20
+ """
21
+
22
+ def __init__(self, data_sampler_info, batch_size):
23
+ if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
24
+ batch_size <= 0:
25
+ raise ValueError("batch_size should be a positive integer value, "
26
+ "but got batch_size={}".format(batch_size))
27
+ assert batch_size%2 == 0
28
+ self.data_sampler_info = data_sampler_info
29
+ self.batch_size = batch_size
30
+ self.n_desired_batches = int(np.floor(len(self.data_sampler_info['name_list']) / batch_size)) # 157
31
+
32
+ def get_description(self):
33
+ description = "\
34
+ This sampler works only for even batch sizes. \n\
35
+ It returns pairs of dogs of the same breed"
36
+ return description
37
+
38
+
39
+ def __iter__(self):
40
+ breeds_summary = self.data_sampler_info['breeds_summary']
41
+
42
+ breed_image_dict_orig = {}
43
+ for img_name in self.data_sampler_info['name_list']: # ['n02093859-Kerry_blue_terrier/n02093859_913.jpg', ... ]
44
+ folder_name = img_name.split('/')[0]
45
+ breed_name = folder_name.split(folder_name.split('-')[0] + '-')[1]
46
+ if not (breed_name in breed_image_dict_orig):
47
+ breed_image_dict_orig[breed_name] = [img_name]
48
+ else:
49
+ breed_image_dict_orig[breed_name].append(img_name)
50
+
51
+ lengths = np.zeros((len(breed_image_dict_orig.values())))
52
+ for ind, value in enumerate(breed_image_dict_orig.values()):
53
+ lengths[ind] = len(value)
54
+
55
+ sim_matrix_raw = self.data_sampler_info['breeds_sim_martix_raw']
56
+ sim_matrix_raw[sim_matrix_raw>0].shape # we have 1061 connections
57
+
58
+ # from ind_in_sim_mat to breed_name
59
+ inverse_sim_dict = {}
60
+ for abbrev, ind in self.data_sampler_info['breeds_sim_abbrev_inds'].items():
61
+ # breed_name might be None
62
+ breed = breeds_summary[abbrev]
63
+ breed_name = breed._name_stanext
64
+ inverse_sim_dict[ind] = {'abbrev': abbrev,
65
+ 'breed_name': breed_name}
66
+
67
+ # similarity for relevant breeds only:
68
+ related_breeds_top_orig = {}
69
+ temp = np.arange(sim_matrix_raw.shape[0])
70
+ for breed_name, breed_images in breed_image_dict_orig.items():
71
+ abbrev = self.data_sampler_info['breeds_abbrev_dict'][breed_name]
72
+ related_breeds = {}
73
+ if abbrev in self.data_sampler_info['breeds_sim_abbrev_inds'].keys():
74
+ ind_in_sim_mat = self.data_sampler_info['breeds_sim_abbrev_inds'][abbrev]
75
+ row = sim_matrix_raw[ind_in_sim_mat, :]
76
+ rel_inds = temp[row>0]
77
+ for ind in rel_inds:
78
+ rel_breed_name = inverse_sim_dict[ind]['breed_name']
79
+ rel_abbrev = inverse_sim_dict[ind]['abbrev']
80
+ # does this breed exist in this dataset?
81
+ if (rel_breed_name is not None) and (rel_breed_name in breed_image_dict_orig.keys()) and not (rel_breed_name==breed_name):
82
+ related_breeds[rel_breed_name] = row[ind]
83
+ related_breeds_top_orig[breed_name] = related_breeds
84
+
85
+ breed_image_dict = copy.deepcopy(breed_image_dict_orig)
86
+ related_breeds_top = copy.deepcopy(related_breeds_top_orig)
87
+
88
+ # clean the related_breeds_top dict such that it only contains breeds which are available
89
+ for breed_name, breed_images in breed_image_dict.items():
90
+ if len(breed_image_dict[breed_name]) < 1:
91
+ for breed_name_rel in list(related_breeds_top[breed_name].keys()):
92
+ related_breeds_top[breed_name_rel].pop(breed_name, None)
93
+ related_breeds_top[breed_name].pop(breed_name_rel, None)
94
+
95
+ # 1) build pairs of dogs
96
+ set_of_breeds_with_at_least_2 = set()
97
+ for breed_name, breed_images in breed_image_dict.items():
98
+ if len(breed_images) >= 2:
99
+ set_of_breeds_with_at_least_2.add(breed_name)
100
+
101
+ n_unused_images = len(self.data_sampler_info['name_list'])
102
+ all_dog_duos = []
103
+ n_new_duos = 1
104
+ while n_new_duos > 0:
105
+ for breed_name, breed_images in breed_image_dict.items():
106
+ # shuffle image list for this specific breed (this changes the dict)
107
+ random.shuffle(breed_images)
108
+ breed_list = list(related_breeds_top.keys())
109
+ random.shuffle(breed_list)
110
+ n_new_duos = 0
111
+ for breed_name in breed_list:
112
+ if len(breed_image_dict[breed_name]) >= 2:
113
+ dog_a = breed_image_dict[breed_name].pop()
114
+ dog_b = breed_image_dict[breed_name].pop()
115
+ dog_duo = [dog_a, dog_b]
116
+ all_dog_duos.append({'image_names': dog_duo})
117
+ # clean the related_breeds_top dict such that it only contains breeds which are still available
118
+ if len(breed_image_dict[breed_name]) < 1:
119
+ for breed_name_rel in list(related_breeds_top[breed_name].keys()):
120
+ related_breeds_top[breed_name_rel].pop(breed_name, None)
121
+ related_breeds_top[breed_name].pop(breed_name_rel, None)
122
+ n_new_duos += 1
123
+ n_unused_images -= 2
124
+
125
+ image_name_to_ind = {}
126
+ for ind_img_name, img_name in enumerate(self.data_sampler_info['name_list']):
127
+ image_name_to_ind[img_name] = ind_img_name
128
+
129
+ # take all images and create the batches
130
+ n_avail_2 = len(all_dog_duos)
131
+ all_batches = []
132
+ ind_in_duos = 0
133
+ n_imgs_used_twice = 0
134
+ for ind_b in range(0, self.n_desired_batches):
135
+ batch_with_image_names = []
136
+ for ind in range(int(np.floor(self.batch_size / 2))):
137
+ if ind_in_duos >= n_avail_2:
138
+ ind_rand = random.randint(0, n_avail_2-1)
139
+ batch_with_image_names.extend(all_dog_duos[ind_rand]['image_names'])
140
+ n_imgs_used_twice += 2
141
+ else:
142
+ batch_with_image_names.extend(all_dog_duos[ind_in_duos]['image_names'])
143
+ ind_in_duos += 1
144
+
145
+
146
+ batch_with_inds = []
147
+ for image_name in batch_with_image_names: # rather a folder than name
148
+ batch_with_inds.append(image_name_to_ind[image_name])
149
+
150
+ all_batches.append(batch_with_inds)
151
+
152
+ for batch in all_batches:
153
+ yield batch
154
+
155
+ def __len__(self):
156
+ # Since we are sampling pairs of dogs and not each breed has an even number of dogs, we can not
157
+ # guarantee to show each dog exacly once. What we do instead, is returning the same amount of
158
+ # batches as we would return with a standard sampler which is not based on dog pairs.
159
+ '''if self.drop_last:
160
+ return len(self.sampler) // self.batch_size # type: ignore
161
+ else:
162
+ return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore'''
163
+ return self.n_desired_batches
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+
src/stacked_hourglass/datasets/stanext24.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 24 joints instead of 20!!
2
+
3
+
4
+ import gzip
5
+ import json
6
+ import os
7
+ import random
8
+ import math
9
+ import numpy as np
10
+ import torch
11
+ import torch.utils.data as data
12
+ from importlib_resources import open_binary
13
+ from scipy.io import loadmat
14
+ from tabulate import tabulate
15
+ import itertools
16
+ import json
17
+ from scipy import ndimage
18
+
19
+ from csv import DictReader
20
+ from pycocotools.mask import decode as decode_RLE
21
+
22
+ import sys
23
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
24
+ from configs.data_info import COMPLETE_DATA_INFO_24
25
+ from stacked_hourglass.utils.imutils import load_image, draw_labelmap, draw_multiple_labelmaps
26
+ from stacked_hourglass.utils.misc import to_torch
27
+ from stacked_hourglass.utils.transforms import shufflelr, crop, color_normalize, fliplr, transform
28
+ import stacked_hourglass.datasets.utils_stanext as utils_stanext
29
+ from stacked_hourglass.utils.visualization import save_input_image_with_keypoints
30
+ from configs.dog_breeds.dog_breed_class import COMPLETE_ABBREV_DICT, COMPLETE_SUMMARY_BREEDS, SIM_MATRIX_RAW, SIM_ABBREV_INDICES
31
+ from configs.dataset_path_configs import STANEXT_RELATED_DATA_ROOT_DIR
32
+
33
+
34
+ class StanExt(data.Dataset):
35
+ DATA_INFO = COMPLETE_DATA_INFO_24
36
+
37
+ # Suggested joints to use for keypoint reprojection error calculations
38
+ ACC_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16]
39
+
40
+ def __init__(self, image_path=None, is_train=True, inp_res=256, out_res=64, sigma=1,
41
+ scale_factor=0.25, rot_factor=30, label_type='Gaussian',
42
+ do_augment='default', shorten_dataset_to=None, dataset_mode='keyp_only', V12=None, val_opt='test'):
43
+ self.V12 = V12
44
+ self.is_train = is_train # training set or test set
45
+ if do_augment == 'yes':
46
+ self.do_augment = True
47
+ elif do_augment == 'no':
48
+ self.do_augment = False
49
+ elif do_augment=='default':
50
+ if self.is_train:
51
+ self.do_augment = True
52
+ else:
53
+ self.do_augment = False
54
+ else:
55
+ raise ValueError
56
+ self.inp_res = inp_res
57
+ self.out_res = out_res
58
+ self.sigma = sigma
59
+ self.scale_factor = scale_factor
60
+ self.rot_factor = rot_factor
61
+ self.label_type = label_type
62
+ self.dataset_mode = dataset_mode
63
+ if self.dataset_mode=='complete' or self.dataset_mode=='keyp_and_seg' or self.dataset_mode=='keyp_and_seg_and_partseg':
64
+ self.calc_seg = True
65
+ else:
66
+ self.calc_seg = False
67
+ self.val_opt = val_opt
68
+
69
+ # create train/val split
70
+ self.img_folder = utils_stanext.get_img_dir(V12=self.V12)
71
+ self.train_dict, init_test_dict, init_val_dict = utils_stanext.load_stanext_json_as_dict(split_train_test=True, V12=self.V12)
72
+ self.train_name_list = list(self.train_dict.keys()) # 7004
73
+ if self.val_opt == 'test':
74
+ self.test_dict = init_test_dict
75
+ self.test_name_list = list(self.test_dict.keys())
76
+ elif self.val_opt == 'val':
77
+ self.test_dict = init_val_dict
78
+ self.test_name_list = list(self.test_dict.keys())
79
+ else:
80
+ raise NotImplementedError
81
+
82
+ # stanext breed dict (contains for each name a stanext specific index)
83
+ breed_json_path = os.path.join(STANEXT_RELATED_DATA_ROOT_DIR, 'StanExt_breed_dict_v2.json')
84
+ self.breed_dict = self.get_breed_dict(breed_json_path, create_new_breed_json=False)
85
+ self.train_name_list = sorted(self.train_name_list)
86
+ self.test_name_list = sorted(self.test_name_list)
87
+ random.seed(4)
88
+ random.shuffle(self.train_name_list)
89
+ random.shuffle(self.test_name_list)
90
+ if shorten_dataset_to is not None:
91
+ # sometimes it is useful to have a smaller set (validation speed, debugging)
92
+ self.train_name_list = self.train_name_list[0 : min(len(self.train_name_list), shorten_dataset_to)]
93
+ self.test_name_list = self.test_name_list[0 : min(len(self.test_name_list), shorten_dataset_to)]
94
+ # special case for debugging: 12 similar images
95
+ if shorten_dataset_to == 12:
96
+ my_sample = self.test_name_list[2]
97
+ for ind in range(0, 12):
98
+ self.test_name_list[ind] = my_sample
99
+ print('len(dataset): ' + str(self.__len__()))
100
+
101
+ # add results for eyes, whithers and throat as obtained through anipose -> they are used
102
+ # as pseudo ground truth at training time.
103
+ self.path_anipose_out_root = os.path.join(STANEXT_RELATED_DATA_ROOT_DIR, 'animalpose_hg8_v0_results_on_StanExt')
104
+
105
+
106
+ def get_data_sampler_info(self):
107
+ # for custom data sampler
108
+ if self.is_train:
109
+ name_list = self.train_name_list
110
+ else:
111
+ name_list = self.test_name_list
112
+ info_dict = {'name_list': name_list,
113
+ 'stanext_breed_dict': self.breed_dict,
114
+ 'breeds_abbrev_dict': COMPLETE_ABBREV_DICT,
115
+ 'breeds_summary': COMPLETE_SUMMARY_BREEDS,
116
+ 'breeds_sim_martix_raw': SIM_MATRIX_RAW,
117
+ 'breeds_sim_abbrev_inds': SIM_ABBREV_INDICES
118
+ }
119
+ return info_dict
120
+
121
+
122
+ def get_breed_dict(self, breed_json_path, create_new_breed_json=False):
123
+ if create_new_breed_json:
124
+ breed_dict = {}
125
+ breed_index = 0
126
+ for img_name in self.train_name_list:
127
+ folder_name = img_name.split('/')[0]
128
+ breed_name = folder_name.split(folder_name.split('-')[0] + '-')[1]
129
+ if not (folder_name in breed_dict):
130
+ breed_dict[folder_name] = {
131
+ 'breed_name': breed_name,
132
+ 'index': breed_index}
133
+ breed_index += 1
134
+ with open(breed_json_path, 'w', encoding='utf-8') as f: json.dump(breed_dict, f, ensure_ascii=False, indent=4)
135
+ else:
136
+ with open(breed_json_path) as json_file: breed_dict = json.load(json_file)
137
+ return breed_dict
138
+
139
+
140
+ def __getitem__(self, index):
141
+
142
+ if self.is_train:
143
+ name = self.train_name_list[index]
144
+ data = self.train_dict[name]
145
+ else:
146
+ name = self.test_name_list[index]
147
+ data = self.test_dict[name]
148
+
149
+ sf = self.scale_factor
150
+ rf = self.rot_factor
151
+
152
+ img_path = os.path.join(self.img_folder, data['img_path'])
153
+ try:
154
+ anipose_res_path = os.path.join(self.path_anipose_out_root, data['img_path'].replace('.jpg', '.json'))
155
+ with open(anipose_res_path) as f: anipose_data = json.load(f)
156
+ anipose_thr = 0.2
157
+ anipose_joints_0to24 = np.asarray(anipose_data['anipose_joints_0to24']).reshape((-1, 3))
158
+ anipose_joints_0to24_scores = anipose_joints_0to24[:, 2]
159
+ # anipose_joints_0to24_scores[anipose_joints_0to24_scores>anipose_thr] = 1.0
160
+ anipose_joints_0to24_scores[anipose_joints_0to24_scores<anipose_thr] = 0.0
161
+ anipose_joints_0to24[:, 2] = anipose_joints_0to24_scores
162
+ except:
163
+ # REMARK: This happens sometimes!!! maybe once every 10th image..?
164
+ # print('no anipose eye keypoints!')
165
+ anipose_joints_0to24 = np.zeros((24, 3))
166
+
167
+ joints = np.concatenate((np.asarray(data['joints'])[:20, :], anipose_joints_0to24[20:24, :]), axis=0)
168
+ joints[joints[:, 2]==0, :2] = 0 # avoid nan values
169
+ pts = torch.Tensor(joints)
170
+
171
+ # inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r)
172
+ # sf = scale * 200.0 / res[0] # res[0]=256
173
+ # center = center * 1.0 / sf
174
+ # scale = scale / sf = 256 / 200
175
+ # h = 200 * scale
176
+ bbox_xywh = data['img_bbox']
177
+ bbox_c = [bbox_xywh[0]+0.5*bbox_xywh[2], bbox_xywh[1]+0.5*bbox_xywh[3]]
178
+ bbox_max = max(bbox_xywh[2], bbox_xywh[3])
179
+ bbox_diag = math.sqrt(bbox_xywh[2]**2 + bbox_xywh[3]**2)
180
+ # bbox_s = bbox_max / 200. # the dog will fill the image -> bbox_max = 256
181
+ # bbox_s = bbox_diag / 200. # diagonal of the boundingbox will be 200
182
+ bbox_s = bbox_max / 200. * 256. / 200. # maximum side of the bbox will be 200
183
+ c = torch.Tensor(bbox_c)
184
+ s = bbox_s
185
+
186
+ # For single-person pose estimation with a centered/scaled figure
187
+ nparts = pts.size(0)
188
+ img = load_image(img_path) # CxHxW
189
+
190
+ # segmentation map (we reshape it to 3xHxW, such that we can do the
191
+ # same transformations as with the image)
192
+ if self.calc_seg:
193
+ seg = torch.Tensor(utils_stanext.get_seg_from_entry(data)[None, :, :])
194
+ seg = torch.cat(3*[seg])
195
+
196
+ r = 0
197
+ do_flip = False
198
+ if self.do_augment:
199
+ s = s*torch.randn(1).mul_(sf).add_(1).clamp(1-sf, 1+sf)[0]
200
+ r = torch.randn(1).mul_(rf).clamp(-2*rf, 2*rf)[0] if random.random() <= 0.6 else 0
201
+ # Flip
202
+ if random.random() <= 0.5:
203
+ do_flip = True
204
+ img = fliplr(img)
205
+ if self.calc_seg:
206
+ seg = fliplr(seg)
207
+ pts = shufflelr(pts, img.size(2), self.DATA_INFO.hflip_indices)
208
+ c[0] = img.size(2) - c[0]
209
+ # Color
210
+ img[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
211
+ img[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
212
+ img[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
213
+
214
+ # Prepare image and groundtruth map
215
+ inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r)
216
+ img_border_mask = torch.all(inp > 1.0/256, dim = 0).unsqueeze(0).float() # 1 is foreground
217
+ inp = color_normalize(inp, self.DATA_INFO.rgb_mean, self.DATA_INFO.rgb_stddev)
218
+ if self.calc_seg:
219
+ seg = crop(seg, c, s, [self.inp_res, self.inp_res], rot=r)
220
+
221
+ # Generate ground truth
222
+ tpts = pts.clone()
223
+ target_weight = tpts[:, 2].clone().view(nparts, 1)
224
+
225
+ target = torch.zeros(nparts, self.out_res, self.out_res)
226
+ for i in range(nparts):
227
+ # if tpts[i, 2] > 0: # This is evil!!
228
+ if tpts[i, 1] > 0:
229
+ tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2]+1, c, s, [self.out_res, self.out_res], rot=r, as_int=False))
230
+ target[i], vis = draw_labelmap(target[i], tpts[i]-1, self.sigma, type=self.label_type)
231
+ target_weight[i, 0] *= vis
232
+ # NEW:
233
+ '''target_new, vis_new = draw_multiple_labelmaps((self.out_res, self.out_res), tpts[:, :2]-1, self.sigma, type=self.label_type)
234
+ target_weight_new = tpts[:, 2].clone().view(nparts, 1) * vis_new
235
+ target_new[(target_weight_new==0).reshape((-1)), :, :] = 0'''
236
+
237
+ # --- Meta info
238
+ this_breed = self.breed_dict[name.split('/')[0]] # 120
239
+ # add information about location within breed similarity matrix
240
+ folder_name = name.split('/')[0]
241
+ breed_name = folder_name.split(folder_name.split('-')[0] + '-')[1]
242
+ abbrev = COMPLETE_ABBREV_DICT[breed_name]
243
+ try:
244
+ sim_breed_index = COMPLETE_SUMMARY_BREEDS[abbrev]._ind_in_xlsx_matrix
245
+ except: # some breeds are not in the xlsx file
246
+ sim_breed_index = -1
247
+ meta = {'index' : index, 'center' : c, 'scale' : s,
248
+ 'pts' : pts, 'tpts' : tpts, 'target_weight': target_weight,
249
+ 'breed_index': this_breed['index'], 'sim_breed_index': sim_breed_index,
250
+ 'ind_dataset': 0} # ind_dataset=0 for stanext or stanexteasy or stanext 2
251
+ meta2 = {'index' : index, 'center' : c, 'scale' : s,
252
+ 'pts' : pts, 'tpts' : tpts, 'target_weight': target_weight,
253
+ 'ind_dataset': 3}
254
+
255
+ # return different things depending on dataset_mode
256
+ if self.dataset_mode=='keyp_only':
257
+ # save_input_image_with_keypoints(inp, meta['tpts'], out_path='./test_input_stanext.png', ratio_in_out=self.inp_res/self.out_res)
258
+ return inp, target, meta
259
+ elif self.dataset_mode=='keyp_and_seg':
260
+ meta['silh'] = seg[0, :, :]
261
+ meta['name'] = name
262
+ return inp, target, meta
263
+ elif self.dataset_mode=='keyp_and_seg_and_partseg':
264
+ # partseg is fake! this does only exist such that this dataset can be combined with an other datset that has part segmentations
265
+ meta2['silh'] = seg[0, :, :]
266
+ meta2['name'] = name
267
+ fake_body_part_matrix = torch.ones((3, 256, 256)).long() * (-1)
268
+ meta2['body_part_matrix'] = fake_body_part_matrix
269
+ return inp, target, meta2
270
+ elif self.dataset_mode=='complete':
271
+ target_dict = meta
272
+ target_dict['silh'] = seg[0, :, :]
273
+ # NEW for silhouette loss
274
+ target_dict['img_border_mask'] = img_border_mask
275
+ target_dict['has_seg'] = True
276
+ if target_dict['silh'].sum() < 1:
277
+ if ((not self.is_train) and self.val_opt == 'test'):
278
+ raise ValueError
279
+ elif self.is_train:
280
+ print('had to replace training image')
281
+ replacement_index = max(0, index - 1)
282
+ inp, target_dict = self.__getitem__(replacement_index)
283
+ else:
284
+ # There seem to be a few validation images without segmentation
285
+ # which would lead to nan in iou calculation
286
+ replacement_index = max(0, index - 1)
287
+ inp, target_dict = self.__getitem__(replacement_index)
288
+ return inp, target_dict
289
+ else:
290
+ print('sampling error')
291
+ import pdb; pdb.set_trace()
292
+ raise ValueError
293
+
294
+
295
+ def __len__(self):
296
+ if self.is_train:
297
+ return len(self.train_name_list)
298
+ else:
299
+ return len(self.test_name_list)
300
+
301
+
src/stacked_hourglass/datasets/utils_stanext.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ from matplotlib import pyplot as plt
4
+ import glob
5
+ import json
6
+ import numpy as np
7
+ from scipy.io import loadmat
8
+ from csv import DictReader
9
+ from collections import OrderedDict
10
+ from pycocotools.mask import decode as decode_RLE
11
+
12
+ import sys
13
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
14
+ from configs.dataset_path_configs import IMG_V12_DIR, JSON_V12_DIR, STAN_V12_TRAIN_LIST_DIR, STAN_V12_VAL_LIST_DIR, STAN_V12_TEST_LIST_DIR
15
+
16
+
17
+ def get_img_dir(V12):
18
+ if V12:
19
+ return IMG_V12_DIR
20
+ else:
21
+ return IMG_DIR
22
+
23
+ def get_seg_from_entry(entry):
24
+ """Given a .json entry, returns the binary mask as a numpy array"""
25
+ rle = {
26
+ "size": [entry['img_height'], entry['img_width']],
27
+ "counts": entry['seg']}
28
+ decoded = decode_RLE(rle)
29
+ return decoded
30
+
31
+ def full_animal_visible(seg_data):
32
+ if seg_data[0, :].sum() == 0 and seg_data[seg_data.shape[0]-1, :].sum() == 0 and seg_data[:, 0].sum() == 0 and seg_data[:, seg_data.shape[1]-1].sum() == 0:
33
+ return True
34
+ else:
35
+ return False
36
+
37
+ def load_train_and_test_lists(train_list_dir=None , test_list_dir=None):
38
+ """ returns sets containing names such as 'n02085620-Chihuahua/n02085620_5927.jpg' """
39
+ # train data
40
+ train_list_mat = loadmat(train_list_dir)
41
+ train_list = []
42
+ for ind in range(0, train_list_mat['file_list'].shape[0]):
43
+ name = train_list_mat['file_list'][ind, 0][0]
44
+ train_list.append(name)
45
+ # test data
46
+ test_list_mat = loadmat(test_list_dir)
47
+ test_list = []
48
+ for ind in range(0, test_list_mat['file_list'].shape[0]):
49
+ name = test_list_mat['file_list'][ind, 0][0]
50
+ test_list.append(name)
51
+ return train_list, test_list
52
+
53
+
54
+
55
+ def _filter_dict(t_list, j_dict, n_kp_min=4):
56
+ """ should only be used by load_stanext_json_as_dict() """
57
+ out_dict = {}
58
+ for sample in t_list:
59
+ if sample in j_dict.keys():
60
+ n_kp = np.asarray(j_dict[sample]['joints'])[:, 2].sum()
61
+ if n_kp >= n_kp_min:
62
+ out_dict[sample] = j_dict[sample]
63
+ return out_dict
64
+
65
+ def load_stanext_json_as_dict(split_train_test=True, V12=True):
66
+ # load json into memory
67
+ if V12:
68
+ with open(JSON_V12_DIR) as infile:
69
+ json_data = json.load(infile)
70
+ # with open(JSON_V12_DIR) as infile: json_data = json.load(infile, object_pairs_hook=OrderedDict)
71
+ else:
72
+ with open(JSON_DIR) as infile:
73
+ json_data = json.load(infile)
74
+ # convert json data to a dictionary of img_path : all_data, for easy lookup
75
+ json_dict = {i['img_path']: i for i in json_data}
76
+ if split_train_test:
77
+ if V12:
78
+ train_list_numbers = np.load(STAN_V12_TRAIN_LIST_DIR)
79
+ val_list_numbers = np.load(STAN_V12_VAL_LIST_DIR)
80
+ test_list_numbers = np.load(STAN_V12_TEST_LIST_DIR)
81
+ train_list = [json_data[i]['img_path'] for i in train_list_numbers]
82
+ val_list = [json_data[i]['img_path'] for i in val_list_numbers]
83
+ test_list = [json_data[i]['img_path'] for i in test_list_numbers]
84
+ train_dict = _filter_dict(train_list, json_dict, n_kp_min=4)
85
+ val_dict = _filter_dict(val_list, json_dict, n_kp_min=4)
86
+ test_dict = _filter_dict(test_list, json_dict, n_kp_min=4)
87
+ return train_dict, test_dict, val_dict
88
+ else:
89
+ train_list, test_list = load_train_and_test_lists(train_list_dir=STAN_ORIG_TRAIN_LIST_DIR , test_list_dir=STAN_ORIG_TEST_LIST_DIR)
90
+ train_dict = _filter_dict(train_list, json_dict)
91
+ test_dict = _filter_dict(test_list, json_dict)
92
+ return train_dict, test_dict, None
93
+ else:
94
+ return json_dict
95
+
96
+ def get_dog(json_dict, name, img_dir=None): # (json_dict, name, img_dir=IMG_DIR)
97
+ """ takes the name of a dog, and loads in all the relevant information as a dictionary:
98
+ dict_keys(['img_path', 'img_width', 'img_height', 'joints', 'img_bbox',
99
+ 'is_multiple_dogs', 'seg', 'img_data', 'seg_data'])
100
+ img_bbox: [x0, y0, width, height] """
101
+ data = json_dict[name]
102
+ # load img
103
+ img_data = plt.imread(os.path.join(img_dir, data['img_path']))
104
+ # load seg
105
+ seg_data = get_seg_from_entry(data)
106
+ # add to output
107
+ data['img_data'] = img_data # 0 to 255
108
+ data['seg_data'] = seg_data # 0: bg, 1: fg
109
+ return data
110
+
111
+
112
+
113
+
114
+
src/stacked_hourglass/model.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # https://github.com/anibali/pytorch-stacked-hourglass
3
+ # https://github.com/bearpaw/pytorch-pose
4
+ # Hourglass network inserted in the pre-activated Resnet
5
+ # Use lr=0.01 for current version
6
+ # (c) YANG, Wei
7
+
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from torch.hub import load_state_dict_from_url
13
+
14
+
15
+ __all__ = ['HourglassNet', 'hg']
16
+
17
+
18
+ model_urls = {
19
+ 'hg1': 'https://github.com/anibali/pytorch-stacked-hourglass/releases/download/v0.0.0/bearpaw_hg1-ce125879.pth',
20
+ 'hg2': 'https://github.com/anibali/pytorch-stacked-hourglass/releases/download/v0.0.0/bearpaw_hg2-15e342d9.pth',
21
+ 'hg8': 'https://github.com/anibali/pytorch-stacked-hourglass/releases/download/v0.0.0/bearpaw_hg8-90e5d470.pth',
22
+ }
23
+
24
+
25
+ class Bottleneck(nn.Module):
26
+ expansion = 2
27
+
28
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
29
+ super(Bottleneck, self).__init__()
30
+
31
+ self.bn1 = nn.BatchNorm2d(inplanes)
32
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
33
+ self.bn2 = nn.BatchNorm2d(planes)
34
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
35
+ padding=1, bias=True)
36
+ self.bn3 = nn.BatchNorm2d(planes)
37
+ self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=True)
38
+ self.relu = nn.ReLU(inplace=True)
39
+ self.downsample = downsample
40
+ self.stride = stride
41
+
42
+ def forward(self, x):
43
+ residual = x
44
+
45
+ out = self.bn1(x)
46
+ out = self.relu(out)
47
+ out = self.conv1(out)
48
+
49
+ out = self.bn2(out)
50
+ out = self.relu(out)
51
+ out = self.conv2(out)
52
+
53
+ out = self.bn3(out)
54
+ out = self.relu(out)
55
+ out = self.conv3(out)
56
+
57
+ if self.downsample is not None:
58
+ residual = self.downsample(x)
59
+
60
+ out += residual
61
+
62
+ return out
63
+
64
+
65
+ class Hourglass(nn.Module):
66
+ def __init__(self, block, num_blocks, planes, depth):
67
+ super(Hourglass, self).__init__()
68
+ self.depth = depth
69
+ self.block = block
70
+ self.hg = self._make_hour_glass(block, num_blocks, planes, depth)
71
+
72
+ def _make_residual(self, block, num_blocks, planes):
73
+ layers = []
74
+ for i in range(0, num_blocks):
75
+ layers.append(block(planes*block.expansion, planes))
76
+ return nn.Sequential(*layers)
77
+
78
+ def _make_hour_glass(self, block, num_blocks, planes, depth):
79
+ hg = []
80
+ for i in range(depth):
81
+ res = []
82
+ for j in range(3):
83
+ res.append(self._make_residual(block, num_blocks, planes))
84
+ if i == 0:
85
+ res.append(self._make_residual(block, num_blocks, planes))
86
+ hg.append(nn.ModuleList(res))
87
+ return nn.ModuleList(hg)
88
+
89
+ def _hour_glass_forward(self, n, x):
90
+ up1 = self.hg[n-1][0](x)
91
+ low1 = F.max_pool2d(x, 2, stride=2)
92
+ low1 = self.hg[n-1][1](low1)
93
+
94
+ if n > 1:
95
+ low2 = self._hour_glass_forward(n-1, low1)
96
+ else:
97
+ low2 = self.hg[n-1][3](low1)
98
+ low3 = self.hg[n-1][2](low2)
99
+ up2 = F.interpolate(low3, scale_factor=2)
100
+ out = up1 + up2
101
+ return out
102
+
103
+ def forward(self, x):
104
+ return self._hour_glass_forward(self.depth, x)
105
+
106
+
107
+ class HourglassNet(nn.Module):
108
+ '''Hourglass model from Newell et al ECCV 2016'''
109
+ def __init__(self, block, num_stacks=2, num_blocks=4, num_classes=16, upsample_seg=False, add_partseg=False, num_partseg=None):
110
+ super(HourglassNet, self).__init__()
111
+
112
+ self.inplanes = 64
113
+ self.num_feats = 128
114
+ self.num_stacks = num_stacks
115
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
116
+ bias=True)
117
+ self.bn1 = nn.BatchNorm2d(self.inplanes)
118
+ self.relu = nn.ReLU(inplace=True)
119
+ self.layer1 = self._make_residual(block, self.inplanes, 1)
120
+ self.layer2 = self._make_residual(block, self.inplanes, 1)
121
+ self.layer3 = self._make_residual(block, self.num_feats, 1)
122
+ self.maxpool = nn.MaxPool2d(2, stride=2)
123
+ self.upsample_seg = upsample_seg
124
+ self.add_partseg = add_partseg
125
+
126
+ # build hourglass modules
127
+ ch = self.num_feats*block.expansion
128
+ hg, res, fc, score, fc_, score_ = [], [], [], [], [], []
129
+ for i in range(num_stacks):
130
+ hg.append(Hourglass(block, num_blocks, self.num_feats, 4))
131
+ res.append(self._make_residual(block, self.num_feats, num_blocks))
132
+ fc.append(self._make_fc(ch, ch))
133
+ score.append(nn.Conv2d(ch, num_classes, kernel_size=1, bias=True))
134
+ if i < num_stacks-1:
135
+ fc_.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))
136
+ score_.append(nn.Conv2d(num_classes, ch, kernel_size=1, bias=True))
137
+ self.hg = nn.ModuleList(hg)
138
+ self.res = nn.ModuleList(res)
139
+ self.fc = nn.ModuleList(fc)
140
+ self.score = nn.ModuleList(score)
141
+ self.fc_ = nn.ModuleList(fc_)
142
+ self.score_ = nn.ModuleList(score_)
143
+
144
+ if self.add_partseg:
145
+ self.hg_ps = (Hourglass(block, num_blocks, self.num_feats, 4))
146
+ self.res_ps = (self._make_residual(block, self.num_feats, num_blocks))
147
+ self.fc_ps = (self._make_fc(ch, ch))
148
+ self.score_ps = (nn.Conv2d(ch, num_partseg, kernel_size=1, bias=True))
149
+ self.ups_upsampling_ps = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
150
+
151
+
152
+ if self.upsample_seg:
153
+ self.ups_upsampling = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
154
+ self.ups_conv0 = nn.Conv2d(3, 32, kernel_size=7, stride=1, padding=3,
155
+ bias=True)
156
+ self.ups_bn1 = nn.BatchNorm2d(32)
157
+ self.ups_conv1 = nn.Conv2d(32, 16, kernel_size=7, stride=1, padding=3,
158
+ bias=True)
159
+ self.ups_bn2 = nn.BatchNorm2d(16+2)
160
+ self.ups_conv2 = nn.Conv2d(16+2, 16, kernel_size=5, stride=1, padding=2,
161
+ bias=True)
162
+ self.ups_bn3 = nn.BatchNorm2d(16)
163
+ self.ups_conv3 = nn.Conv2d(16, 2, kernel_size=5, stride=1, padding=2,
164
+ bias=True)
165
+
166
+
167
+
168
+ def _make_residual(self, block, planes, blocks, stride=1):
169
+ downsample = None
170
+ if stride != 1 or self.inplanes != planes * block.expansion:
171
+ downsample = nn.Sequential(
172
+ nn.Conv2d(self.inplanes, planes * block.expansion,
173
+ kernel_size=1, stride=stride, bias=True),
174
+ )
175
+
176
+ layers = []
177
+ layers.append(block(self.inplanes, planes, stride, downsample))
178
+ self.inplanes = planes * block.expansion
179
+ for i in range(1, blocks):
180
+ layers.append(block(self.inplanes, planes))
181
+
182
+ return nn.Sequential(*layers)
183
+
184
+ def _make_fc(self, inplanes, outplanes):
185
+ bn = nn.BatchNorm2d(inplanes)
186
+ conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)
187
+ return nn.Sequential(
188
+ conv,
189
+ bn,
190
+ self.relu,
191
+ )
192
+
193
+ def forward(self, x_in):
194
+ out = []
195
+ out_seg = []
196
+ out_partseg = []
197
+ x = self.conv1(x_in)
198
+ x = self.bn1(x)
199
+ x = self.relu(x)
200
+
201
+ x = self.layer1(x)
202
+ x = self.maxpool(x)
203
+ x = self.layer2(x)
204
+ x = self.layer3(x)
205
+
206
+ for i in range(self.num_stacks):
207
+ if i == self.num_stacks - 1:
208
+ if self.add_partseg:
209
+ y_ps = self.hg_ps(x)
210
+ y_ps = self.res_ps(y_ps)
211
+ y_ps = self.fc_ps(y_ps)
212
+ score_ps = self.score_ps(y_ps)
213
+ out_partseg.append(score_ps[:, :, :, :])
214
+ y = self.hg[i](x)
215
+ y = self.res[i](y)
216
+ y = self.fc[i](y)
217
+ score = self.score[i](y)
218
+ if self.upsample_seg:
219
+ out.append(score[:, :-2, :, :])
220
+ out_seg.append(score[:, -2:, :, :])
221
+ else:
222
+ out.append(score)
223
+ if i < self.num_stacks-1:
224
+ fc_ = self.fc_[i](y)
225
+ score_ = self.score_[i](score)
226
+ x = x + fc_ + score_
227
+
228
+ if self.upsample_seg:
229
+ # PLAN: add a residual to the upsampled version of the segmentation image
230
+ # upsample predicted segmentation
231
+ seg_score = score[:, -2:, :, :]
232
+ seg_score_256 = self.ups_upsampling(seg_score)
233
+ # prepare input image
234
+
235
+ ups_img = self.ups_conv0(x_in)
236
+
237
+ ups_img = self.ups_bn1(ups_img)
238
+ ups_img = self.relu(ups_img)
239
+ ups_img = self.ups_conv1(ups_img)
240
+
241
+ # import pdb; pdb.set_trace()
242
+
243
+ ups_conc = torch.cat((seg_score_256, ups_img), 1)
244
+
245
+ # ups_conc = self.ups_bn2(ups_conc)
246
+ ups_conc = self.relu(ups_conc)
247
+ ups_conc = self.ups_conv2(ups_conc)
248
+
249
+ ups_conc = self.ups_bn3(ups_conc)
250
+ ups_conc = self.relu(ups_conc)
251
+ correction = self.ups_conv3(ups_conc)
252
+
253
+ seg_final = seg_score_256 + correction
254
+
255
+ if self.add_partseg:
256
+ partseg_final = self.ups_upsampling_ps(score_ps)
257
+ out_dict = {'out_list_kp': out,
258
+ 'out_list_seg': out,
259
+ 'seg_final': seg_final,
260
+ 'out_list_partseg': out_partseg,
261
+ 'partseg_final': partseg_final
262
+ }
263
+ return out_dict
264
+ else:
265
+ out_dict = {'out_list_kp': out,
266
+ 'out_list_seg': out,
267
+ 'seg_final': seg_final
268
+ }
269
+ return out_dict
270
+
271
+ return out
272
+
273
+
274
+ def hg(**kwargs):
275
+ model = HourglassNet(Bottleneck, num_stacks=kwargs['num_stacks'], num_blocks=kwargs['num_blocks'],
276
+ num_classes=kwargs['num_classes'], upsample_seg=kwargs['upsample_seg'],
277
+ add_partseg=kwargs['add_partseg'], num_partseg=kwargs['num_partseg'])
278
+ return model
279
+
280
+
281
+ def _hg(arch, pretrained, progress, **kwargs):
282
+ model = hg(**kwargs)
283
+ if pretrained:
284
+ state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
285
+ model.load_state_dict(state_dict)
286
+ return model
287
+
288
+
289
+ def hg1(pretrained=False, progress=True, num_blocks=1, num_classes=16, upsample_seg=False, add_partseg=False, num_partseg=None):
290
+ return _hg('hg1', pretrained, progress, num_stacks=1, num_blocks=num_blocks,
291
+ num_classes=num_classes, upsample_seg=upsample_seg,
292
+ add_partseg=add_partseg, num_partseg=num_partseg)
293
+
294
+
295
+ def hg2(pretrained=False, progress=True, num_blocks=1, num_classes=16, upsample_seg=False, add_partseg=False, num_partseg=None):
296
+ return _hg('hg2', pretrained, progress, num_stacks=2, num_blocks=num_blocks,
297
+ num_classes=num_classes, upsample_seg=upsample_seg,
298
+ add_partseg=add_partseg, num_partseg=num_partseg)
299
+
300
+ def hg4(pretrained=False, progress=True, num_blocks=1, num_classes=16, upsample_seg=False, add_partseg=False, num_partseg=None):
301
+ return _hg('hg4', pretrained, progress, num_stacks=4, num_blocks=num_blocks,
302
+ num_classes=num_classes, upsample_seg=upsample_seg,
303
+ add_partseg=add_partseg, num_partseg=num_partseg)
304
+
305
+ def hg8(pretrained=False, progress=True, num_blocks=1, num_classes=16, upsample_seg=False, add_partseg=False, num_partseg=None):
306
+ return _hg('hg8', pretrained, progress, num_stacks=8, num_blocks=num_blocks,
307
+ num_classes=num_classes, upsample_seg=upsample_seg,
308
+ add_partseg=add_partseg, num_partseg=num_partseg)
src/stacked_hourglass/predictor.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Modified from:
3
+ # https://github.com/anibali/pytorch-stacked-hourglass
4
+ # https://github.com/bearpaw/pytorch-pose
5
+
6
+ import torch
7
+ from stacked_hourglass.utils.evaluation import final_preds_untransformed
8
+ from stacked_hourglass.utils.imfit import fit, calculate_fit_contain_output_area
9
+ from stacked_hourglass.utils.transforms import color_normalize, fliplr, flip_back
10
+
11
+
12
+ def _check_batched(images):
13
+ if isinstance(images, (tuple, list)):
14
+ return True
15
+ if images.ndimension() == 4:
16
+ return True
17
+ return False
18
+
19
+
20
+ class HumanPosePredictor:
21
+ def __init__(self, model, device=None, data_info=None, input_shape=None):
22
+ """Helper class for predicting 2D human pose joint locations.
23
+
24
+ Args:
25
+ model: The model for generating joint heatmaps.
26
+ device: The computational device to use for inference.
27
+ data_info: Specifications of the data (defaults to ``Mpii.DATA_INFO``).
28
+ input_shape: The input dimensions of the model (height, width).
29
+ """
30
+ if device is None:
31
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
32
+ device = torch.device(device)
33
+ model.to(device)
34
+ self.model = model
35
+ self.device = device
36
+
37
+ if data_info is None:
38
+ raise ValueError
39
+ # self.data_info = Mpii.DATA_INFO
40
+ else:
41
+ self.data_info = data_info
42
+
43
+ # Input shape ordering: H, W
44
+ if input_shape is None:
45
+ self.input_shape = (256, 256)
46
+ elif isinstance(input_shape, int):
47
+ self.input_shape = (input_shape, input_shape)
48
+ else:
49
+ self.input_shape = input_shape
50
+
51
+ def do_forward(self, input_tensor):
52
+ self.model.eval()
53
+ with torch.no_grad():
54
+ output = self.model(input_tensor)
55
+ return output
56
+
57
+ def prepare_image(self, image):
58
+ was_fixed_point = not image.is_floating_point()
59
+ image = torch.empty_like(image, dtype=torch.float32).copy_(image)
60
+ if was_fixed_point:
61
+ image /= 255.0
62
+ if image.shape[-2:] != self.input_shape:
63
+ image = fit(image, self.input_shape, fit_mode='contain')
64
+ image = color_normalize(image, self.data_info.rgb_mean, self.data_info.rgb_stddev)
65
+ return image
66
+
67
+ def estimate_heatmaps(self, images, flip=False):
68
+ is_batched = _check_batched(images)
69
+ raw_images = images if is_batched else images.unsqueeze(0)
70
+ input_tensor = torch.empty((len(raw_images), 3, *self.input_shape),
71
+ device=self.device, dtype=torch.float32)
72
+ for i, raw_image in enumerate(raw_images):
73
+ input_tensor[i] = self.prepare_image(raw_image)
74
+ heatmaps = self.do_forward(input_tensor)[-1].cpu()
75
+ if flip:
76
+ flip_input = fliplr(input_tensor)
77
+ flip_heatmaps = self.do_forward(flip_input)[-1].cpu()
78
+ heatmaps += flip_back(flip_heatmaps, self.data_info.hflip_indices)
79
+ heatmaps /= 2
80
+ if is_batched:
81
+ return heatmaps
82
+ else:
83
+ return heatmaps[0]
84
+
85
+ def estimate_joints(self, images, flip=False):
86
+ """Estimate human joint locations from input images.
87
+
88
+ Images are expected to be centred on a human subject and scaled reasonably.
89
+
90
+ Args:
91
+ images: The images to estimate joint locations for. Can be a single image or a list
92
+ of images.
93
+ flip (bool): If set to true, evaluates on flipped versions of the images as well and
94
+ averages the results.
95
+
96
+ Returns:
97
+ The predicted human joint locations in image pixel space.
98
+ """
99
+ is_batched = _check_batched(images)
100
+ raw_images = images if is_batched else images.unsqueeze(0)
101
+ heatmaps = self.estimate_heatmaps(raw_images, flip=flip).cpu()
102
+ # final_preds_untransformed compares the first component of shape with x and second with y
103
+ # This relates to the image Width, Height (Heatmap has shape Height, Width)
104
+ coords = final_preds_untransformed(heatmaps, heatmaps.shape[-2:][::-1])
105
+ # Rescale coords to pixel space of specified images.
106
+ for i, image in enumerate(raw_images):
107
+ # When returning to original image space we need to compensate for the fact that we are
108
+ # used fit_mode='contain' when preparing the images for inference.
109
+ y_off, x_off, height, width = calculate_fit_contain_output_area(*image.shape[-2:], *self.input_shape)
110
+ coords[i, :, 1] *= self.input_shape[-2] / heatmaps.shape[-2]
111
+ coords[i, :, 1] -= y_off
112
+ coords[i, :, 1] *= image.shape[-2] / height
113
+ coords[i, :, 0] *= self.input_shape[-1] / heatmaps.shape[-1]
114
+ coords[i, :, 0] -= x_off
115
+ coords[i, :, 0] *= image.shape[-1] / width
116
+ if is_batched:
117
+ return coords
118
+ else:
119
+ return coords[0]
src/stacked_hourglass/utils/__init__.py ADDED
File without changes
src/stacked_hourglass/utils/evaluation.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # https://github.com/anibali/pytorch-stacked-hourglass
3
+ # https://github.com/bearpaw/pytorch-pose
4
+
5
+ import math
6
+ import torch
7
+ from kornia.geometry.subpix import dsnt # kornia 0.4.0
8
+ import torch.nn.functional as F
9
+ from .transforms import transform_preds
10
+
11
+ __all__ = ['get_preds', 'get_preds_soft', 'calc_dists', 'dist_acc', 'accuracy', 'final_preds_untransformed',
12
+ 'final_preds', 'AverageMeter']
13
+
14
+ def get_preds(scores, return_maxval=False):
15
+ ''' get predictions from score maps in torch Tensor
16
+ return type: torch.LongTensor
17
+ '''
18
+ assert scores.dim() == 4, 'Score maps should be 4-dim'
19
+ maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)
20
+
21
+ maxval = maxval.view(scores.size(0), scores.size(1), 1)
22
+ idx = idx.view(scores.size(0), scores.size(1), 1) + 1
23
+
24
+ preds = idx.repeat(1, 1, 2).float()
25
+
26
+ preds[:,:,0] = (preds[:,:,0] - 1) % scores.size(3) + 1
27
+ preds[:,:,1] = torch.floor((preds[:,:,1] - 1) / scores.size(3)) + 1
28
+
29
+ pred_mask = maxval.gt(0).repeat(1, 1, 2).float() # values > 0
30
+ preds *= pred_mask
31
+ if return_maxval:
32
+ return preds, maxval
33
+ else:
34
+ return preds
35
+
36
+
37
+ def get_preds_soft(scores, return_maxval=False, norm_coords=False, norm_and_unnorm_coords=False):
38
+ ''' get predictions from score maps in torch Tensor
39
+ predictions are made assuming a logit output map
40
+ return type: torch.LongTensor
41
+ '''
42
+
43
+ # New: work on logit predictions
44
+ scores_norm = dsnt.spatial_softmax2d(scores, temperature=torch.tensor(1))
45
+ # maxval_norm, idx_norm = torch.max(scores_norm.view(scores.size(0), scores.size(1), -1), 2)
46
+ # from unnormalized to normalized see:
47
+ # from -1to1 to 0to64
48
+ # see https://github.com/kornia/kornia/blob/b9ffe7efcba7399daeeb8028f10c22941b55d32d/kornia/utils/grid.py#L7 (line 40)
49
+ # xs = (xs / (width - 1) - 0.5) * 2
50
+ # ys = (ys / (height - 1) - 0.5) * 2
51
+
52
+ device = scores.device
53
+
54
+ if return_maxval:
55
+ preds_normalized = dsnt.spatial_expectation2d(scores_norm, normalized_coordinates=True)
56
+ # grid_sample(input, grid, mode='bilinear', padding_mode='zeros')
57
+ gs_input_single = scores_norm.reshape((-1, 1, scores_norm.shape[2], scores_norm.shape[3])) # (120, 1, 64, 64)
58
+ gs_input = scores_norm.reshape((-1, 1, scores_norm.shape[2], scores_norm.shape[3])) # (120, 1, 64, 64)
59
+
60
+ half_pad = 2
61
+ gs_input_single_padded = F.pad(input=gs_input_single, pad=(half_pad, half_pad, half_pad, half_pad, 0, 0, 0, 0), mode='constant', value=0)
62
+ gs_input_all = torch.zeros((gs_input_single.shape[0], 9, gs_input_single.shape[2], gs_input_single.shape[3])).to(device)
63
+ ind_tot = 0
64
+ for ind0 in [-1, 0, 1]:
65
+ for ind1 in [-1, 0, 1]:
66
+ gs_input_all[:, ind_tot, :, :] = gs_input_single_padded[:, 0, half_pad+ind0:-half_pad+ind0, half_pad+ind1:-half_pad+ind1]
67
+ ind_tot +=1
68
+
69
+ gs_grid = preds_normalized.reshape((-1, 2))[:, None, None, :] # (120, 1, 1, 2)
70
+ gs_output_all = F.grid_sample(gs_input_all, gs_grid, mode='nearest', padding_mode='zeros', align_corners=True).reshape((gs_input_all.shape[0], gs_input_all.shape[1], 1))
71
+ gs_output = gs_output_all.sum(axis=1)
72
+ # scores_norm[0, :, :, :].max(axis=2)[0].max(axis=1)[0]
73
+ # gs_output[0, :, 0]
74
+ gs_output_resh = gs_output.reshape((scores_norm.shape[0], scores_norm.shape[1], 1))
75
+
76
+ if norm_and_unnorm_coords:
77
+ preds = dsnt.spatial_expectation2d(scores_norm, normalized_coordinates=False) + 1
78
+ return preds, preds_normalized, gs_output_resh
79
+ elif norm_coords:
80
+ return preds_normalized, gs_output_resh
81
+ else:
82
+ preds = dsnt.spatial_expectation2d(scores_norm, normalized_coordinates=False) + 1
83
+ return preds, gs_output_resh
84
+ else:
85
+ if norm_coords:
86
+ preds_normalized = dsnt.spatial_expectation2d(scores_norm, normalized_coordinates=True)
87
+ return preds_normalized
88
+ else:
89
+ preds = dsnt.spatial_expectation2d(scores_norm, normalized_coordinates=False) + 1
90
+ return preds
91
+
92
+
93
+ def calc_dists(preds, target, normalize):
94
+ preds = preds.float()
95
+ target = target.float()
96
+ dists = torch.zeros(preds.size(1), preds.size(0))
97
+ for n in range(preds.size(0)):
98
+ for c in range(preds.size(1)):
99
+ if target[n,c,0] > 1 and target[n, c, 1] > 1:
100
+ dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
101
+ else:
102
+ dists[c, n] = -1
103
+ return dists
104
+
105
+ def dist_acc(dist, thr=0.5):
106
+ ''' Return percentage below threshold while ignoring values with a -1 '''
107
+ dist = dist[dist != -1]
108
+ if len(dist) > 0:
109
+ return 1.0 * (dist < thr).sum().item() / len(dist)
110
+ else:
111
+ return -1
112
+
113
+ def accuracy(output, target, idxs=None, thr=0.5):
114
+ ''' Calculate accuracy according to PCK, but uses ground truth heatmap rather than x,y locations
115
+ First value to be returned is average accuracy across 'idxs', followed by individual accuracies
116
+ '''
117
+ if idxs is None:
118
+ idxs = list(range(target.shape[-3]))
119
+ preds = get_preds_soft(output) # get_preds(output)
120
+ gts = get_preds(target)
121
+ norm = torch.ones(preds.size(0))*output.size(3)/10
122
+ dists = calc_dists(preds, gts, norm)
123
+
124
+ acc = torch.zeros(len(idxs)+1)
125
+ avg_acc = 0
126
+ cnt = 0
127
+
128
+ for i in range(len(idxs)):
129
+ acc[i+1] = dist_acc(dists[idxs[i]], thr=thr)
130
+ if acc[i+1] >= 0:
131
+ avg_acc = avg_acc + acc[i+1]
132
+ cnt += 1
133
+
134
+ if cnt != 0:
135
+ acc[0] = avg_acc / cnt
136
+ return acc
137
+
138
+ def final_preds_untransformed(output, res):
139
+ coords = get_preds_soft(output) # get_preds(output) # float type
140
+
141
+ # pose-processing
142
+ for n in range(coords.size(0)):
143
+ for p in range(coords.size(1)):
144
+ hm = output[n][p]
145
+ px = int(math.floor(coords[n][p][0]))
146
+ py = int(math.floor(coords[n][p][1]))
147
+ if px > 1 and px < res[0] and py > 1 and py < res[1]:
148
+ diff = torch.Tensor([hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1]-hm[py - 2][px - 1]])
149
+ coords[n][p] += diff.sign() * .25
150
+ coords += 0.5
151
+
152
+ if coords.dim() < 3:
153
+ coords = coords.unsqueeze(0)
154
+
155
+ coords -= 1 # Convert from 1-based to 0-based coordinates
156
+
157
+ return coords
158
+
159
+ def final_preds(output, center, scale, res):
160
+ coords = final_preds_untransformed(output, res)
161
+ preds = coords.clone()
162
+
163
+ # Transform back
164
+ for i in range(coords.size(0)):
165
+ preds[i] = transform_preds(coords[i], center[i], scale[i], res)
166
+
167
+ if preds.dim() < 3:
168
+ preds = preds.unsqueeze(0)
169
+
170
+ return preds
171
+
172
+
173
+ class AverageMeter(object):
174
+ """Computes and stores the average and current value"""
175
+ def __init__(self):
176
+ self.reset()
177
+
178
+ def reset(self):
179
+ self.val = 0
180
+ self.avg = 0
181
+ self.sum = 0
182
+ self.count = 0
183
+
184
+ def update(self, val, n=1):
185
+ self.val = val
186
+ self.sum += val * n
187
+ self.count += n
188
+ self.avg = self.sum / self.count
src/stacked_hourglass/utils/finetune.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # https://github.com/anibali/pytorch-stacked-hourglass
3
+ # https://github.com/bearpaw/pytorch-pose
4
+
5
+ import torch
6
+ from torch.nn import Conv2d, ModuleList
7
+
8
+
9
+ def change_hg_outputs(model, indices):
10
+ """Change the output classes of the model.
11
+
12
+ Args:
13
+ model: The model to modify.
14
+ indices: An array of indices describing the new model outputs. For example, [3, 4, None]
15
+ will modify the model to have 3 outputs, the first two of which have parameters
16
+ copied from the fourth and fifth outputs of the original model.
17
+ """
18
+ with torch.no_grad():
19
+ new_n_outputs = len(indices)
20
+ new_score = ModuleList()
21
+ for conv in model.score:
22
+ new_conv = Conv2d(conv.in_channels, new_n_outputs, conv.kernel_size, conv.stride)
23
+ new_conv = new_conv.to(conv.weight.device, conv.weight.dtype)
24
+ for i, index in enumerate(indices):
25
+ if index is not None:
26
+ new_conv.weight[i] = conv.weight[index]
27
+ new_conv.bias[i] = conv.bias[index]
28
+ new_score.append(new_conv)
29
+ model.score = new_score
30
+ new_score_ = ModuleList()
31
+ for conv in model.score_:
32
+ new_conv = Conv2d(new_n_outputs, conv.out_channels, conv.kernel_size, conv.stride)
33
+ new_conv = new_conv.to(conv.weight.device, conv.weight.dtype)
34
+ for i, index in enumerate(indices):
35
+ if index is not None:
36
+ new_conv.weight[:, i] = conv.weight[:, index]
37
+ new_conv.bias = conv.bias
38
+ new_score_.append(new_conv)
39
+ model.score_ = new_score_
src/stacked_hourglass/utils/imfit.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # https://github.com/anibali/pytorch-stacked-hourglass
3
+ # https://github.com/bearpaw/pytorch-pose
4
+
5
+ import torch
6
+ from torch.nn.functional import interpolate
7
+
8
+
9
+ def _resize(tensor, size, mode='bilinear'):
10
+ """Resize the image.
11
+
12
+ Args:
13
+ tensor (torch.Tensor): The image tensor to be resized.
14
+ size (tuple of int): Size of the resized image (height, width).
15
+ mode (str): The pixel sampling interpolation mode to be used.
16
+
17
+ Returns:
18
+ Tensor: The resized image tensor.
19
+ """
20
+ assert len(size) == 2
21
+
22
+ # If the tensor is already the desired size, return it immediately.
23
+ if tensor.shape[-2] == size[0] and tensor.shape[-1] == size[1]:
24
+ return tensor
25
+
26
+ if not tensor.is_floating_point():
27
+ dtype = tensor.dtype
28
+ tensor = tensor.to(torch.float32)
29
+ tensor = _resize(tensor, size, mode)
30
+ return tensor.to(dtype)
31
+
32
+ out_shape = (*tensor.shape[:-2], *size)
33
+ if tensor.ndimension() < 3:
34
+ raise Exception('tensor must be at least 2D')
35
+ elif tensor.ndimension() == 3:
36
+ tensor = tensor.unsqueeze(0)
37
+ elif tensor.ndimension() > 4:
38
+ tensor = tensor.view(-1, *tensor.shape[-3:])
39
+ align_corners = None
40
+ if mode in {'linear', 'bilinear', 'trilinear'}:
41
+ align_corners = False
42
+ resized = interpolate(tensor, size=size, mode=mode, align_corners=align_corners)
43
+ return resized.view(*out_shape)
44
+
45
+
46
+ def _crop(tensor, t, l, h, w, padding_mode='constant', fill=0):
47
+ """Crop the image, padding out-of-bounds regions.
48
+
49
+ Args:
50
+ tensor (torch.Tensor): The image tensor to be cropped.
51
+ t (int): Top pixel coordinate.
52
+ l (int): Left pixel coordinate.
53
+ h (int): Height of the cropped image.
54
+ w (int): Width of the cropped image.
55
+ padding_mode (str): Padding mode (currently "constant" is the only valid option).
56
+ fill (float): Fill value to use with constant padding.
57
+
58
+ Returns:
59
+ Tensor: The cropped image tensor.
60
+ """
61
+ # If the _crop region is wholly within the image, simply narrow the tensor.
62
+ if t >= 0 and l >= 0 and t + h <= tensor.size(-2) and l + w <= tensor.size(-1):
63
+ return tensor[..., t:t+h, l:l+w]
64
+
65
+ if padding_mode == 'constant':
66
+ result = torch.full((*tensor.size()[:-2], h, w), fill,
67
+ device=tensor.device, dtype=tensor.dtype)
68
+ else:
69
+ raise Exception('_crop only supports "constant" padding currently.')
70
+
71
+ sx1 = l
72
+ sy1 = t
73
+ sx2 = l + w
74
+ sy2 = t + h
75
+ dx1 = 0
76
+ dy1 = 0
77
+
78
+ if sx1 < 0:
79
+ dx1 = -sx1
80
+ w += sx1
81
+ sx1 = 0
82
+
83
+ if sy1 < 0:
84
+ dy1 = -sy1
85
+ h += sy1
86
+ sy1 = 0
87
+
88
+ if sx2 >= tensor.size(-1):
89
+ w -= sx2 - tensor.size(-1)
90
+
91
+ if sy2 >= tensor.size(-2):
92
+ h -= sy2 - tensor.size(-2)
93
+
94
+ # Copy the in-bounds sub-area of the _crop region into the result tensor.
95
+ if h > 0 and w > 0:
96
+ src = tensor.narrow(-2, sy1, h).narrow(-1, sx1, w)
97
+ dst = result.narrow(-2, dy1, h).narrow(-1, dx1, w)
98
+ dst.copy_(src)
99
+
100
+ return result
101
+
102
+
103
+ def calculate_fit_contain_output_area(in_height, in_width, out_height, out_width):
104
+ ih, iw = in_height, in_width
105
+ k = min(out_width / iw, out_height / ih)
106
+ oh = round(k * ih)
107
+ ow = round(k * iw)
108
+ y_off = (out_height - oh) // 2
109
+ x_off = (out_width - ow) // 2
110
+ return y_off, x_off, oh, ow
111
+
112
+
113
+ def fit(tensor, size, fit_mode='cover', resize_mode='bilinear', *, fill=0):
114
+ """Fit the image within the given spatial dimensions.
115
+
116
+ Args:
117
+ tensor (torch.Tensor): The image tensor to be fit.
118
+ size (tuple of int): Size of the output (height, width).
119
+ fit_mode (str): 'fill', 'contain', or 'cover'. These behave in the same way as CSS's
120
+ `object-fit` property.
121
+ fill (float): padding value (only applicable in 'contain' mode).
122
+
123
+ Returns:
124
+ Tensor: The resized image tensor.
125
+ """
126
+ if fit_mode == 'fill':
127
+ return _resize(tensor, size, mode=resize_mode)
128
+ elif fit_mode == 'contain':
129
+ y_off, x_off, oh, ow = calculate_fit_contain_output_area(*tensor.shape[-2:], *size)
130
+ resized = _resize(tensor, (oh, ow), mode=resize_mode)
131
+ result = tensor.new_full((*tensor.size()[:-2], *size), fill)
132
+ result[..., y_off:y_off + oh, x_off:x_off + ow] = resized
133
+ return result
134
+ elif fit_mode == 'cover':
135
+ ih, iw = tensor.shape[-2:]
136
+ k = max(size[-1] / iw, size[-2] / ih)
137
+ oh = round(k * ih)
138
+ ow = round(k * iw)
139
+ resized = _resize(tensor, (oh, ow), mode=resize_mode)
140
+ y_trim = (oh - size[-2]) // 2
141
+ x_trim = (ow - size[-1]) // 2
142
+ result = _crop(resized, y_trim, x_trim, size[-2], size[-1])
143
+ return result
144
+ raise ValueError('Invalid fit_mode: ' + repr(fit_mode))
src/stacked_hourglass/utils/imutils.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # https://github.com/anibali/pytorch-stacked-hourglass
3
+ # https://github.com/bearpaw/pytorch-pose
4
+
5
+ import numpy as np
6
+
7
+ from .misc import to_numpy, to_torch
8
+ from .pilutil import imread, imresize
9
+ from kornia.geometry.subpix import dsnt
10
+ import torch
11
+
12
+ def im_to_numpy(img):
13
+ img = to_numpy(img)
14
+ img = np.transpose(img, (1, 2, 0)) # H*W*C
15
+ return img
16
+
17
+ def im_to_torch(img):
18
+ img = np.transpose(img, (2, 0, 1)) # C*H*W
19
+ img = to_torch(img).float()
20
+ if img.max() > 1:
21
+ img /= 255
22
+ return img
23
+
24
+ def load_image(img_path):
25
+ # H x W x C => C x H x W
26
+ return im_to_torch(imread(img_path, mode='RGB'))
27
+
28
+ # =============================================================================
29
+ # Helpful functions generating groundtruth labelmap
30
+ # =============================================================================
31
+
32
+ def gaussian(shape=(7,7),sigma=1):
33
+ """
34
+ 2D gaussian mask - should give the same result as MATLAB's
35
+ fspecial('gaussian',[shape],[sigma])
36
+ """
37
+ m,n = [(ss-1.)/2. for ss in shape]
38
+ y,x = np.ogrid[-m:m+1,-n:n+1]
39
+ h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
40
+ h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
41
+ return to_torch(h).float()
42
+
43
+ def draw_labelmap_orig(img, pt, sigma, type='Gaussian'):
44
+ # Draw a 2D gaussian
45
+ # Adopted from https://github.com/anewell/pose-hg-train/blob/master/src/pypose/draw.py
46
+ # maximum value of the gaussian is 1
47
+ img = to_numpy(img)
48
+
49
+ # Check that any part of the gaussian is in-bounds
50
+ ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
51
+ br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
52
+ if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
53
+ br[0] < 0 or br[1] < 0):
54
+ # If not, just return the image as is
55
+ return to_torch(img), 0
56
+
57
+ # Generate gaussian
58
+ size = 6 * sigma + 1
59
+ x = np.arange(0, size, 1, float)
60
+ y = x[:, np.newaxis]
61
+ x0 = y0 = size // 2
62
+ # The gaussian is not normalized, we want the center value to equal 1
63
+ if type == 'Gaussian':
64
+ g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
65
+ elif type == 'Cauchy':
66
+ g = sigma / (((x - x0) ** 2 + (y - y0) ** 2 + sigma ** 2) ** 1.5)
67
+
68
+ # Usable gaussian range
69
+ g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
70
+ g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
71
+ # Image range
72
+ img_x = max(0, ul[0]), min(br[0], img.shape[1])
73
+ img_y = max(0, ul[1]), min(br[1], img.shape[0])
74
+
75
+ img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
76
+
77
+ return to_torch(img), 1
78
+
79
+
80
+
81
+ def draw_labelmap(img, pt, sigma, type='Gaussian'):
82
+ # Draw a 2D gaussian
83
+ # real probability distribution: the sum of all values is 1
84
+ img = to_numpy(img)
85
+ if not type == 'Gaussian':
86
+ raise NotImplementedError
87
+
88
+ # Check that any part of the gaussian is in-bounds
89
+ ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
90
+ br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
91
+ if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
92
+ br[0] < 0 or br[1] < 0):
93
+ # If not, just return the image as is
94
+ return to_torch(img), 0
95
+
96
+ # Generate gaussian
97
+ # img_new = dsnt.render_gaussian2d(mean=torch.tensor([[-1, 0]]).float(), std=torch.tensor([[sigma, sigma]]).float(), size=(img.shape[0], img.shape[1]), normalized_coordinates=False)
98
+ img_new = dsnt.render_gaussian2d(mean=torch.tensor([[pt[0], pt[1]]]).float(), \
99
+ std=torch.tensor([[sigma, sigma]]).float(), \
100
+ size=(img.shape[0], img.shape[1]), \
101
+ normalized_coordinates=False)
102
+ img_new = img_new[0, :, :] # this is a torch image
103
+ return img_new, 1
104
+
105
+
106
+ def draw_multiple_labelmaps(out_res, pts, sigma, type='Gaussian'):
107
+ # Draw a 2D gaussian
108
+ # real probability distribution: the sum of all values is 1
109
+ if not type == 'Gaussian':
110
+ raise NotImplementedError
111
+
112
+ # Generate gaussians
113
+ n_pts = pts.shape[0]
114
+ imgs_new = dsnt.render_gaussian2d(mean=pts[:, :2], \
115
+ std=torch.tensor([[sigma, sigma]]).float().repeat((n_pts, 1)), \
116
+ size=(out_res[0], out_res[1]), \
117
+ normalized_coordinates=False) # shape: (n_pts, out_res[0], out_res[1])
118
+
119
+ visibility_orig = imgs_new.sum(axis=2).sum(axis=1) # shape: (n_pts)
120
+ visibility = torch.zeros((n_pts, 1), dtype=torch.float32)
121
+ visibility[visibility_orig>=0.99999] = 1.0
122
+
123
+ # import pdb; pdb.set_trace()
124
+
125
+ return imgs_new, visibility.int()