daidedou commited on
Commit
3ccb54b
·
1 Parent(s): 3c50fb3

Remove pykeops for space ZeroGPU demo.

Browse files
Files changed (3) hide show
  1. app.py +3 -3
  2. utils/mesh.py +1 -1
  3. utils/torch_fmap.py +8 -9
app.py CHANGED
@@ -87,14 +87,14 @@ DEFAULT_SETTINGS = {
87
  FLOAT_SLIDERS = {
88
  # name: (min, max, step)
89
  "deepfeat_conf.fmap.lambda_": (1e-3, 10.0, 1e-3),
90
- "sds_conf.zoomout": (1e-3, 10.0, 1e-3),
91
- "diffusion.time": (1e-3, 10.0, 1e-3),
92
  "loss.sds": (1e-3, 10.0, 1e-3),
93
  "loss.proper": (1e-3, 10.0, 1e-3),
94
  }
95
 
96
  INT_SLIDERS = {
97
  "opt.n_loop": (1, 5000, 1),
 
98
  }
99
 
100
 
@@ -223,7 +223,7 @@ with gr.Blocks(title="DiffuMatch demo") as demo:
223
  with gr.Accordion("Settings", open=True):
224
  with gr.Row():
225
  lambda_val = gr.Slider(minimum=FLOAT_SLIDERS["deepfeat_conf.fmap.lambda_"][0], maximum=FLOAT_SLIDERS["deepfeat_conf.fmap.lambda_"][1], step=FLOAT_SLIDERS["deepfeat_conf.fmap.lambda_"][2], value=1, label="deepfeat_conf.fmap.lambda_")
226
- zoomout_val = gr.Slider(minimum=FLOAT_SLIDERS["sds_conf.zoomout"][0], maximum=FLOAT_SLIDERS["sds_conf.zoomout"][1], step=FLOAT_SLIDERS["sds_conf.zoomout"][2], value=40, label="sds_conf.zoomout")
227
  time_val = gr.Slider(minimum=FLOAT_SLIDERS["diffusion.time"][0], maximum=FLOAT_SLIDERS["diffusion.time"][1], step=FLOAT_SLIDERS["diffusion.time"][2], value=1, label="diffusion.time")
228
  with gr.Row():
229
  nloop_val = gr.Slider(minimum=INT_SLIDERS["opt.n_loop"][0], maximum=INT_SLIDERS["opt.n_loop"][1], step=INT_SLIDERS["opt.n_loop"][2], value=300, label="opt.n_loop")
 
87
  FLOAT_SLIDERS = {
88
  # name: (min, max, step)
89
  "deepfeat_conf.fmap.lambda_": (1e-3, 10.0, 1e-3),
90
+ "diffusion.time": (0.1, 10.0, 0.1),
 
91
  "loss.sds": (1e-3, 10.0, 1e-3),
92
  "loss.proper": (1e-3, 10.0, 1e-3),
93
  }
94
 
95
  INT_SLIDERS = {
96
  "opt.n_loop": (1, 5000, 1),
97
+ "sds_conf.zoomout": (31, 50, 1),
98
  }
99
 
100
 
 
223
  with gr.Accordion("Settings", open=True):
224
  with gr.Row():
225
  lambda_val = gr.Slider(minimum=FLOAT_SLIDERS["deepfeat_conf.fmap.lambda_"][0], maximum=FLOAT_SLIDERS["deepfeat_conf.fmap.lambda_"][1], step=FLOAT_SLIDERS["deepfeat_conf.fmap.lambda_"][2], value=1, label="deepfeat_conf.fmap.lambda_")
226
+ zoomout_val = gr.Slider(minimum=INT_SLIDERS["sds_conf.zoomout"][0], maximum=INT_SLIDERS["sds_conf.zoomout"][1], step=INT_SLIDERS["sds_conf.zoomout"][2], value=40, label="sds_conf.zoomout")
227
  time_val = gr.Slider(minimum=FLOAT_SLIDERS["diffusion.time"][0], maximum=FLOAT_SLIDERS["diffusion.time"][1], step=FLOAT_SLIDERS["diffusion.time"][2], value=1, label="diffusion.time")
228
  with gr.Row():
229
  nloop_val = gr.Slider(minimum=INT_SLIDERS["opt.n_loop"][0], maximum=INT_SLIDERS["opt.n_loop"][1], step=INT_SLIDERS["opt.n_loop"][2], value=300, label="opt.n_loop")
utils/mesh.py CHANGED
@@ -31,7 +31,7 @@ def list_files(folder_path, name_filter, alphanum_sort=False):
31
  else:
32
  return sorted(file_list)
33
 
34
- def find_mesh_files(directory: Path, extensions: set=MESH_EXTENSIONS, alphanum_sort=False) -> list[Path]:
35
  """
36
  Recursively find all files in 'directory' whose suffix (lowercased) is in 'extensions'.
37
  Returns a list of Path objects.
 
31
  else:
32
  return sorted(file_list)
33
 
34
+ def find_mesh_files(directory, extensions, alphanum_sort=False): # Path, extensions: set=MESH_EXTENSIONS, alphanum_sort=False) -> list[Path]:
35
  """
36
  Recursively find all files in 'directory' whose suffix (lowercased) is in 'extensions'.
37
  Returns a list of Path objects.
utils/torch_fmap.py CHANGED
@@ -1,6 +1,5 @@
1
  import torch
2
  import torch.nn.functional as F
3
- from pykeops.torch import LazyTensor
4
 
5
 
6
  def euclidean_dist(x, y):
@@ -33,14 +32,14 @@ def extract_p2p_torch(reps_shape, reps_template):
33
  # print((evecs0_dzo @ fmap01_final.squeeze().T).shape)
34
  # print(evecs1_dzo.shape)
35
  reps_shape_torch = torch.from_numpy(reps_shape).float().cuda()
36
- G_i = LazyTensor(reps_shape_torch[:, None, :].contiguous()) # (M**2, 1, 2)
37
  reps_template_torch = torch.from_numpy(reps_template).float().cuda()
38
- X_j = LazyTensor(reps_template_torch[None, :, :n_ev].contiguous()) # (1, N, 2)
39
  D_ij = ((G_i - X_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances
40
- indKNN = D_ij.argKmin(1, dim=0).squeeze() # Grid <-> Samples, (M**2, K) integer tensor
41
  # pmap10_ref = FM_to_p2p(fmap01_final.detach().squeeze().cpu().numpy(), s_dict['evecs'], template_dict['evecs'])
42
  # print(indKNN[:10], pmap10_ref[:10])
43
- indKNN_2 = D_ij.argKmin(1, dim=1).squeeze()
44
  return indKNN.detach().cpu().numpy(), indKNN_2.detach().cpu().numpy()
45
 
46
  def extract_p2p_torch_fmap(fmap_shape_template, evecs_shape, evecs_template):
@@ -48,13 +47,13 @@ def extract_p2p_torch_fmap(fmap_shape_template, evecs_shape, evecs_template):
48
  with torch.no_grad():
49
  # print((evecs0_dzo @ fmap01_final.squeeze().T).shape)
50
  # print(evecs1_dzo.shape)
51
- G_i = LazyTensor((evecs_shape[:, :n_ev] @ fmap_shape_template.squeeze().T)[:, None, :].contiguous()) # (M**2, 1, 2)
52
- X_j = LazyTensor(evecs_template[None, :, :n_ev].contiguous()) # (1, N, 2)
53
  D_ij = ((G_i - X_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances
54
- indKNN = D_ij.argKmin(1, dim=0).squeeze() # Grid <-> Samples, (M**2, K) integer tensor
55
  # pmap10_ref = FM_to_p2p(fmap01_final.detach().squeeze().cpu().numpy(), s_dict['evecs'], template_dict['evecs'])
56
  # print(indKNN[:10], pmap10_ref[:10])
57
- indKNN_2 = D_ij.argKmin(1, dim=1).squeeze()
58
  return indKNN.detach().cpu().numpy(), indKNN_2.detach().cpu().numpy()
59
 
60
  def wlstsq(A, B, w):
 
1
  import torch
2
  import torch.nn.functional as F
 
3
 
4
 
5
  def euclidean_dist(x, y):
 
32
  # print((evecs0_dzo @ fmap01_final.squeeze().T).shape)
33
  # print(evecs1_dzo.shape)
34
  reps_shape_torch = torch.from_numpy(reps_shape).float().cuda()
35
+ G_i = (reps_shape_torch[:, None, :].contiguous()) # (M**2, 1, 2)
36
  reps_template_torch = torch.from_numpy(reps_template).float().cuda()
37
+ X_j = (reps_template_torch[None, :, :n_ev].contiguous()) # (1, N, 2)
38
  D_ij = ((G_i - X_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances
39
+ indKNN = torch.argmin(D_ij, dim=0).squeeze() # Grid <-> Samples, (M**2, K) integer tensor
40
  # pmap10_ref = FM_to_p2p(fmap01_final.detach().squeeze().cpu().numpy(), s_dict['evecs'], template_dict['evecs'])
41
  # print(indKNN[:10], pmap10_ref[:10])
42
+ indKNN_2 = torch.argmin(D_ij, dim=1).squeeze()
43
  return indKNN.detach().cpu().numpy(), indKNN_2.detach().cpu().numpy()
44
 
45
  def extract_p2p_torch_fmap(fmap_shape_template, evecs_shape, evecs_template):
 
47
  with torch.no_grad():
48
  # print((evecs0_dzo @ fmap01_final.squeeze().T).shape)
49
  # print(evecs1_dzo.shape)
50
+ G_i = ((evecs_shape[:, :n_ev] @ fmap_shape_template.squeeze().T)[:, None, :].contiguous()) # (M**2, 1, 2)
51
+ X_j = (evecs_template[None, :, :n_ev].contiguous()) # (1, N, 2)
52
  D_ij = ((G_i - X_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances
53
+ indKNN = torch.argmin(D_ij, dim=0).squeeze() # Grid <-> Samples, (M**2, K) integer tensor
54
  # pmap10_ref = FM_to_p2p(fmap01_final.detach().squeeze().cpu().numpy(), s_dict['evecs'], template_dict['evecs'])
55
  # print(indKNN[:10], pmap10_ref[:10])
56
+ indKNN_2 = torch.argmin(D_ij, dim=1).squeeze()
57
  return indKNN.detach().cpu().numpy(), indKNN_2.detach().cpu().numpy()
58
 
59
  def wlstsq(A, B, w):