atsantiago commited on
Commit
5a4b7bc
1 Parent(s): edb8a3c

Functional Sample

Browse files
Files changed (5) hide show
  1. app.py +53 -25
  2. examples/00015_colors.png +0 -0
  3. examples/car.JPG +0 -0
  4. examples/dash.jpg +0 -0
  5. utils.py +2 -2
app.py CHANGED
@@ -6,6 +6,8 @@ import numpy as np
6
 
7
  from huggingface_hub import from_pretrained_keras
8
 
 
 
9
  custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}
10
  print('Loading model...')
11
  model = from_pretrained_keras("keras-io/monocular-depth-estimation", custom_objects=custom_objects, compile=False)
@@ -15,56 +17,82 @@ import importlib
15
  import utils
16
  importlib.reload(utils)
17
 
 
 
 
 
 
 
 
 
18
  def infer(image, min_th, max_th):
19
- print('_'*20)
20
  inputs = utils.load_images([image])
21
  outputs = utils.predict(model, inputs)
22
 
23
  plasma = plt.get_cmap('plasma')
24
  rescaled = outputs[0][:, :, 0]
25
- print("Min Max Bef", np.min(rescaled), np.max(rescaled))
26
  rescaled = rescaled - np.min(rescaled)
27
  rescaled = rescaled / np.max(rescaled)
28
 
29
- image_out = plasma(rescaled)[:, :, :3]
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- print("Min Max Aft", np.min(rescaled), np.max(rescaled))
 
 
 
 
 
 
 
 
32
 
33
- print("Shape Scaled:",rescaled.shape)
34
- filtered = rescaled
35
- # filtered[filtered[:, :, 0] < min_th/100, 0] = 0
36
- # filtered[filtered[:, :, 0] < min_th/100, 1] = 0
37
- # filtered[filtered[:, :, 0] < min_th/100, 2] = 0
38
- # filt_arr = filtered[((filtered[:,0] > min_th/100) & (filtered[:,0] < max_th/100))]
39
- filt_arr = (filtered > min_th/100) * filtered * (filtered < max_th/100)
40
 
 
41
 
42
- print("Shape Image:",image.shape)
43
- print("Shape Image filt:",im_filt.shape)
44
- print("Shape Image Heat:",image_out.shape)
45
- im_filt = plasma(filt_arr)[:, :, :3]
46
- return image_out, im_filt, image
47
 
48
  # def detr(im):
49
  # return im
50
 
51
  gr_input = [
52
- gr.inputs.Image(label="image", type="numpy", shape=(640, 480))
53
- ,gr.inputs.Slider(minimum=0, maximum=100, step=5, default=0, label="Minimum Threshold")
54
- ,gr.inputs.Slider(minimum=0, maximum=100, step=5, default=100, label="Maximum Threshold")
55
  ]
56
 
57
  gr_output = [
58
- gr.outputs.Image(type="pil",label="HeatMap Image"),
59
- gr.outputs.Image(type="pil",label="Filtered Image"),
60
- gr.outputs.Image(type="pil",label="Output Image")
 
61
  ]
62
 
63
  iface = gr.Interface(
64
  fn=infer,
65
- title="Space Title Here",
66
- description = "Description Here",
67
  inputs = gr_input,
68
- outputs = gr_output
 
 
 
 
 
69
  )
70
  iface.launch()
 
6
 
7
  from huggingface_hub import from_pretrained_keras
8
 
9
+ from einops import repeat
10
+
11
  custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}
12
  print('Loading model...')
13
  model = from_pretrained_keras("keras-io/monocular-depth-estimation", custom_objects=custom_objects, compile=False)
 
17
  import utils
18
  importlib.reload(utils)
19
 
20
+ def layer_over_image(raw_image, filter, custom_color = [0, 0, 0]):
21
+ # print(raw_image[:, :, 0])
22
+ out_image = raw_image
23
+ out_image[:,:,0] = raw_image[:, :, 0] * filter
24
+ out_image[:,:,1] = raw_image[:, :, 1] * filter
25
+ out_image[:,:,2] = raw_image[:, :, 2] * filter
26
+ return raw_image
27
+
28
  def infer(image, min_th, max_th):
29
+ # print('_'*20)
30
  inputs = utils.load_images([image])
31
  outputs = utils.predict(model, inputs)
32
 
33
  plasma = plt.get_cmap('plasma')
34
  rescaled = outputs[0][:, :, 0]
35
+ # print("Min Max Bef", np.min(rescaled), np.max(rescaled))
36
  rescaled = rescaled - np.min(rescaled)
37
  rescaled = rescaled / np.max(rescaled)
38
 
39
+ im_heat = plasma(rescaled)[:, :, :3]
40
+
41
+ # print("Min Max Aft", np.min(rescaled), np.max(rescaled))
42
+
43
+ # print("Shape Scaled:",rescaled.shape)
44
+ filt_base = rescaled
45
+ filt_base = repeat(filt_base, "h w -> (h 2) (w 2)")
46
+ filt_arr_min = (filt_base > min_th/100)
47
+ filt_arr_max = (filt_base < max_th/100)
48
+
49
+ filt_arr = filt_arr_min * filt_base * filt_arr_max
50
+ im_heat_filt = plasma(filt_arr)[:, :, :3]
51
 
52
+ if max_th < 100:
53
+ image_emph = layer_over_image(image, filt_arr_max)
54
+ else:
55
+ image_emph = image
56
+
57
+ if min_th > 0:
58
+ image_emph = layer_over_image(image, filt_arr_min)
59
+ else:
60
+ image_emph = image
61
 
 
 
 
 
 
 
 
62
 
63
+ # print("filt arr min", filt_arr_min)
64
 
65
+ # print("Shape Image:",image.shape)
66
+ # print("Shape Image filt:",im_heat_filt.shape)
67
+ # print("Shape Image Heat:",im_heat.shape)
68
+ return im_heat, image_emph
 
69
 
70
  # def detr(im):
71
  # return im
72
 
73
  gr_input = [
74
+ gr.inputs.Image(label="Image", type="numpy", shape=(640, 480))
75
+ ,gr.inputs.Slider(minimum=0, maximum=100, step=0.5, default=0, label="Minimum Threshold")
76
+ ,gr.inputs.Slider(minimum=0, maximum=100, step=0.5, default=100, label="Maximum Threshold")
77
  ]
78
 
79
  gr_output = [
80
+ gr.outputs.Image(type="pil",label="Depth HeatMap"),
81
+ # gr.outputs.Image(type="pil",label="Filtered Image"),
82
+ # gr.outputs.Image(type="pil",label="Before"),
83
+ gr.outputs.Image(type="pil",label="Important Areas")
84
  ]
85
 
86
  iface = gr.Interface(
87
  fn=infer,
88
+ title="Monocular Depth Filter",
89
+ description = "Used Keras Depth Estimation Model for estimating the depth of areas in an image. Image is then filtered out to only show the selected areas",
90
  inputs = gr_input,
91
+ outputs = gr_output,
92
+ examples=[
93
+ ["examples/00015_colors.png", 0, 90]
94
+ ,["examples/car.jpg", 0, 30]
95
+ ,["examples/dash.jpg", 10, 55]
96
+ ]
97
  )
98
  iface.launch()
examples/00015_colors.png ADDED
examples/car.JPG ADDED
examples/dash.jpg ADDED
utils.py CHANGED
@@ -12,8 +12,8 @@ def predict(model, images, minDepth=10, maxDepth=1000, batch_size=2):
12
  # Compute predictions
13
  predictions = model.predict(images, batch_size=batch_size)
14
  # Put in expected range
15
- print("Max Depth:", np.amax(predictions), maxDepth)
16
- print("Min Depth:", np.amin(predictions), minDepth)
17
  return np.clip(depth_norm(predictions, maxDepth=maxDepth), minDepth, maxDepth) / maxDepth
18
 
19
 
 
12
  # Compute predictions
13
  predictions = model.predict(images, batch_size=batch_size)
14
  # Put in expected range
15
+ # print("Max Depth:", np.amax(predictions), maxDepth)
16
+ # print("Min Depth:", np.amin(predictions), minDepth)
17
  return np.clip(depth_norm(predictions, maxDepth=maxDepth), minDepth, maxDepth) / maxDepth
18
 
19