nick-leland commited on
Commit
b6fa050
·
1 Parent(s): 8093ae7

Updated the gradio app to now include the inverse gradient generation

Browse files
Files changed (2) hide show
  1. app.py +146 -65
  2. temp_app.py +0 -286
app.py CHANGED
@@ -1,100 +1,95 @@
1
  import numpy as np
 
2
  import gradio as gr
3
  from PIL import Image
4
- from scipy import ndimage
5
  import matplotlib.pyplot as plt
6
  from bulk_bulge_generation import definitions, smooth
7
  # from transformers import pipeline
8
  import fastai
9
  from fastcore.all import *
10
  from fastai.vision.all import *
 
11
 
12
  def apply_vector_field_transform(image, func, radius, center=(0.5, 0.5), strength=1, edge_smoothness=0.1, center_smoothness=0.20):
13
- # 0.106 strength = .50
14
- # 0.106 strength = 1
15
  rows, cols = image.shape[:2]
16
  max_dim = max(rows, cols)
17
 
18
- #Normalize the positions
19
- # Y Needs to be flipped
20
  center_y = int(center[1] * rows)
21
  center_x = int(center[0] * cols)
22
-
23
- # Inverts the Y axis (Numpy is 0 index at top of image)
24
  center_y = abs(rows - center_y)
25
 
26
- print()
27
- print(rows, cols)
28
- print("y =", center_y, "/", rows)
29
- print("x =", center_x, "/", cols)
30
- print()
31
-
32
- pixel_radius = int(max_dim * radius)
33
 
34
  y, x = np.ogrid[:rows, :cols]
35
  y = (y - center_y) / max_dim
36
  x = (x - center_x) / max_dim
37
 
38
- # Calculate distance from center
39
  dist_from_center = np.sqrt(x**2 + y**2)
40
 
41
- # Calculate function values
42
  z = func(x, y)
 
43
 
44
- # Calculate gradients
45
  gy, gx = np.gradient(z)
 
 
46
 
47
- # Creating a sigmoid function to apply to masks
48
- def sigmoid(x, center, steepness):
49
- return 1 / (1 + np.exp(-steepness * (x - center)))
50
-
51
- print(radius)
52
- print(strength)
53
- print(edge_smoothness)
54
- print(center_smoothness)
55
 
56
- # Masking
57
  edge_mask = np.clip((radius - dist_from_center) / (radius * edge_smoothness), 0, 1)
58
-
59
  center_mask = np.clip((dist_from_center - radius * center_smoothness) / (radius * center_smoothness), 0, 1)
60
-
61
  mask = edge_mask * center_mask
62
 
63
- # Apply mask to gradients
64
  gx = gx * mask
65
  gy = gy * mask
66
 
67
- # Normalize gradient vectors
68
  magnitude = np.sqrt(gx**2 + gy**2)
69
  magnitude[magnitude == 0] = 1 # Avoid division by zero
70
  gx = gx / magnitude
71
  gy = gy / magnitude
72
 
73
- # Scale the effect (Play with the number 5)
74
- scale_factor = strength * np.log(max_dim) / 100 # Adjust strength based on image size
75
  gx = gx * scale_factor * mask
76
  gy = gy * scale_factor * mask
77
 
78
- # Create the mapping
 
 
 
79
  x_new = x + gx
80
  y_new = y + gy
81
 
82
- # Convert back to pixel coordinates
83
  x_new = x_new * max_dim + center_x
84
  y_new = y_new * max_dim + center_y
85
 
86
- # Ensure the new coordinates are within the image boundaries
87
  x_new = np.clip(x_new, 0, cols - 1)
88
  y_new = np.clip(y_new, 0, rows - 1)
89
 
90
- # Apply the transformation to each channel
91
- channels = [ndimage.map_coordinates(image[..., i], [y_new, x_new], order=1, mode='reflect')
92
- for i in range(image.shape[2])]
93
 
94
- transformed_image = np.dstack(channels).astype(image.dtype)
 
95
 
96
- return transformed_image, (gx, gy)
97
-
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  def create_gradient_vector_field(gx, gy, image_shape, step=20, reverse=False):
100
  """
@@ -142,13 +137,83 @@ def create_gradient_vector_field(gx, gy, image_shape, step=20, reverse=False):
142
 
143
  return vector_field
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  #############################
147
  # MAIN FUNCTION HERE
148
  #############################
149
 
150
- # pipeline = pipeline(task="image-classification", model="nick-leland/distortionml")
151
-
152
  # Version Check
153
  print(f"NumPy version: {np.__version__}")
154
  print(f"PyTorch version: {torch.__version__}")
@@ -157,6 +222,8 @@ print(f"FastAI version: {fastai.__version__}")
157
  learn_bias = load_learner('model_bias.pkl')
158
  learn_fresh = load_learner('model_fresh.pkl')
159
 
 
 
160
 
161
  def transform_image(image, func_choice, randomization_check, radius, center_x, center_y, strength, reverse_gradient=True, spiral_frequency=1):
162
  I = np.asarray(Image.open(image))
@@ -177,16 +244,11 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
177
  return r * np.sin(theta - frequency * r)
178
 
179
  rng = np.random.default_rng()
180
- if randomization_check == True:
181
- radius, location, strength, edge_smoothness= definitions(rng)
182
- center_x = location[0]
183
- center_y = location[1]
184
-
185
- # Temporarily disabling and using these values.
186
- # edge_smoothness = 0.25 * strength
187
- # center_smoothness = 0.25 * strength
188
- edge_smoothness, center_smoothness = smooth(rng, strength)
189
-
190
 
191
  if func_choice == "Pinch":
192
  func = pinch
@@ -202,29 +264,47 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
202
  func = lambda x, y: spiral(x, y, frequency=spiral_frequency)
203
 
204
 
205
- transformed, (gx, gy) = apply_vector_field_transform(I, func, radius, (center_x, center_y), strength, edge_smoothness, center_smoothness)
206
- vector_field = create_gradient_vector_field(gx, gy, I.shape[:2], reverse=reverse_gradient)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
 
208
- # GRADIO CHANGE HERE
209
- # predictions = pipeline(transformed)
210
-
211
- # Have to convert to image first
212
  result = Image.fromarray(transformed)
213
 
214
  categories = ['Distorted', 'Maze']
215
 
216
  def clean_output(result_values):
217
- pred, idx, probs = result_values[0], result_values[1], result_values[2]
218
  return dict(zip(categories, map(float, probs)))
219
 
220
  result_bias = learn_bias.predict(result)
221
  result_fresh = learn_fresh.predict(result)
222
- print("Results")
223
  result_bias_final = clean_output(result_bias)
224
  result_fresh_final = clean_output(result_fresh)
225
 
226
- # return transformed, result_bias, result_fresh, vector_field
227
- return transformed, result_bias_final, result_fresh_final, vector_field
 
228
 
229
  demo = gr.Interface(
230
  fn=transform_image,
@@ -248,10 +328,11 @@ demo = gr.Interface(
248
  ],
249
  outputs=[
250
  gr.Image(label="Transformed Image"),
251
- # gr.Image(label="Result", num_top_classes=2)
252
  gr.Label(),
253
  gr.Label(),
254
- gr.Image(label="Gradient Vector Field")
 
 
255
  ],
256
  title="Image Transformation Demo!",
257
  article="If you like this demo, please star the github repository for the project! Located [here!](https://github.com/nick-leland/DistortionML)",
 
1
  import numpy as np
2
+ import traceback
3
  import gradio as gr
4
  from PIL import Image
5
+ from scipy import ndimage, interpolate
6
  import matplotlib.pyplot as plt
7
  from bulk_bulge_generation import definitions, smooth
8
  # from transformers import pipeline
9
  import fastai
10
  from fastcore.all import *
11
  from fastai.vision.all import *
12
+ from ultralytics import YOLO
13
 
14
  def apply_vector_field_transform(image, func, radius, center=(0.5, 0.5), strength=1, edge_smoothness=0.1, center_smoothness=0.20):
 
 
15
  rows, cols = image.shape[:2]
16
  max_dim = max(rows, cols)
17
 
 
 
18
  center_y = int(center[1] * rows)
19
  center_x = int(center[0] * cols)
 
 
20
  center_y = abs(rows - center_y)
21
 
22
+ print(f"Image shape: {rows}x{cols}")
23
+ print(f"Center: ({center_x}, {center_y})")
24
+ print(f"Radius: {radius}, Strength: {strength}")
25
+ print(f"Edge smoothness: {edge_smoothness}, Center smoothness: {center_smoothness}")
 
 
 
26
 
27
  y, x = np.ogrid[:rows, :cols]
28
  y = (y - center_y) / max_dim
29
  x = (x - center_x) / max_dim
30
 
 
31
  dist_from_center = np.sqrt(x**2 + y**2)
32
 
 
33
  z = func(x, y)
34
+ print(f"Function output - min: {np.min(z)}, max: {np.max(z)}")
35
 
 
36
  gy, gx = np.gradient(z)
37
+ print(f"Initial gradient - gx min: {np.min(gx)}, max: {np.max(gx)}")
38
+ print(f"Initial gradient - gy min: {np.min(gy)}, max: {np.max(gy)}")
39
 
40
+ # Avoid division by zero
41
+ edge_smoothness = np.maximum(edge_smoothness, 1e-6)
42
+ center_smoothness = np.maximum(center_smoothness, 1e-6)
 
 
 
 
 
43
 
 
44
  edge_mask = np.clip((radius - dist_from_center) / (radius * edge_smoothness), 0, 1)
 
45
  center_mask = np.clip((dist_from_center - radius * center_smoothness) / (radius * center_smoothness), 0, 1)
 
46
  mask = edge_mask * center_mask
47
 
 
48
  gx = gx * mask
49
  gy = gy * mask
50
 
 
51
  magnitude = np.sqrt(gx**2 + gy**2)
52
  magnitude[magnitude == 0] = 1 # Avoid division by zero
53
  gx = gx / magnitude
54
  gy = gy / magnitude
55
 
56
+ scale_factor = strength * np.log(max_dim) / 100
 
57
  gx = gx * scale_factor * mask
58
  gy = gy * scale_factor * mask
59
 
60
+ print(f"Final gradient - gx min: {np.min(gx)}, max: {np.max(gx)}")
61
+ print(f"Final gradient - gy min: {np.min(gy)}, max: {np.max(gy)}")
62
+
63
+ # Forward transformation
64
  x_new = x + gx
65
  y_new = y + gy
66
 
 
67
  x_new = x_new * max_dim + center_x
68
  y_new = y_new * max_dim + center_y
69
 
 
70
  x_new = np.clip(x_new, 0, cols - 1)
71
  y_new = np.clip(y_new, 0, rows - 1)
72
 
73
+ # Inverse transformation
74
+ x_inv = x - gx
75
+ y_inv = y - gy
76
 
77
+ x_inv = x_inv * max_dim + center_x
78
+ y_inv = y_inv * max_dim + center_y
79
 
80
+ x_inv = np.clip(x_inv, 0, cols - 1)
81
+ y_inv = np.clip(y_inv, 0, rows - 1)
82
+
83
+ # Apply transformations
84
+ channels_forward = [ndimage.map_coordinates(image[..., i], [y_new, x_new], order=1, mode='reflect')
85
+ for i in range(image.shape[2])]
86
+ channels_inverse = [ndimage.map_coordinates(image[..., i], [y_inv, x_inv], order=1, mode='reflect')
87
+ for i in range(image.shape[2])]
88
+
89
+ transformed_image = np.dstack(channels_forward).astype(image.dtype)
90
+ inverse_transformed_image = np.dstack(channels_inverse).astype(image.dtype)
91
+
92
+ return transformed_image, inverse_transformed_image, (gx, gy)
93
 
94
  def create_gradient_vector_field(gx, gy, image_shape, step=20, reverse=False):
95
  """
 
137
 
138
  return vector_field
139
 
140
+ import numpy as np
141
+ from scipy import interpolate
142
+
143
+ # def invert_gradient_vector_field(gx, gy, image_shape):
144
+ # """
145
+ # Invert the gradient vector field using a more accurate method.
146
+ #
147
+ # :param gx: X-component of the gradient
148
+ # :param gy: Y-component of the gradient
149
+ # :param image_shape: Shape of the original image (height, width)
150
+ # :return: Inverted gx and gy
151
+ # """
152
+ # rows, cols = image_shape
153
+ # y, x = np.mgrid[0:rows, 0:cols]
154
+ #
155
+ # # Calculate the new positions after applying the gradient
156
+ # new_x = x + gx
157
+ # new_y = y + gy
158
+ #
159
+ # # Create a mask for valid (non-NaN, non-infinite) values
160
+ # mask = np.isfinite(new_x) & np.isfinite(new_y)
161
+ #
162
+ # # Flatten and filter the arrays
163
+ # x_flat = x[mask]
164
+ # y_flat = y[mask]
165
+ # new_x_flat = new_x[mask]
166
+ # new_y_flat = new_y[mask]
167
+ #
168
+ # # Create the inverse mapping
169
+ # inv_x = interpolate.griddata((new_x_flat, new_y_flat), x_flat, (x, y), method='linear', fill_value=np.nan)
170
+ # inv_y = interpolate.griddata((new_x_flat, new_y_flat), y_flat, (x, y), method='linear', fill_value=np.nan)
171
+ #
172
+ # # Calculate the inverse gradient
173
+ # inv_gx = inv_x - x
174
+ # inv_gy = inv_y - y
175
+ #
176
+ # # Fill NaN values with zeros
177
+ # inv_gx = np.nan_to_num(inv_gx)
178
+ # inv_gy = np.nan_to_num(inv_gy)
179
+ #
180
+ # return -inv_gx, -inv_gy # Note the negation here
181
+
182
+ def apply_gradient_transform(image, gx, gy):
183
+ """
184
+ Apply the gradient transformation to an image.
185
+
186
+ :param image: Input image as a numpy array
187
+ :param gx: X-component of the gradient
188
+ :param gy: Y-component of the gradient
189
+ :return: Transformed image
190
+ """
191
+ rows, cols = image.shape[:2]
192
+ y, x = np.mgrid[0:rows, 0:cols]
193
+
194
+ # Apply the transformation
195
+ x_new = x + gx
196
+ y_new = y + gy
197
+
198
+ # Ensure the new coordinates are within the image boundaries
199
+ x_new = np.clip(x_new, 0, cols - 1)
200
+ y_new = np.clip(y_new, 0, rows - 1)
201
+
202
+ # Apply the transformation to each channel
203
+ channels = []
204
+ for i in range(image.shape[2]):
205
+ channel = image[:,:,i]
206
+ transformed_channel = interpolate.griddata((y.flatten(), x.flatten()), channel.flatten(), (y_new, x_new), method='linear', fill_value=0)
207
+ channels.append(transformed_channel)
208
+
209
+ transformed_image = np.dstack(channels).astype(image.dtype)
210
+
211
+ return transformed_image
212
 
213
  #############################
214
  # MAIN FUNCTION HERE
215
  #############################
216
 
 
 
217
  # Version Check
218
  print(f"NumPy version: {np.__version__}")
219
  print(f"PyTorch version: {torch.__version__}")
 
222
  learn_bias = load_learner('model_bias.pkl')
223
  learn_fresh = load_learner('model_fresh.pkl')
224
 
225
+ # Loads the YOLO Model
226
+ model = YOLO("bulge_yolo_model.pt")
227
 
228
  def transform_image(image, func_choice, randomization_check, radius, center_x, center_y, strength, reverse_gradient=True, spiral_frequency=1):
229
  I = np.asarray(Image.open(image))
 
244
  return r * np.sin(theta - frequency * r)
245
 
246
  rng = np.random.default_rng()
247
+ if randomization_check:
248
+ radius, location, strength, edge_smoothness = definitions(rng)
249
+ center_x, center_y = location
250
+ else:
251
+ edge_smoothness, center_smoothness = smooth(rng, strength)
 
 
 
 
 
252
 
253
  if func_choice == "Pinch":
254
  func = pinch
 
264
  func = lambda x, y: spiral(x, y, frequency=spiral_frequency)
265
 
266
 
267
+ print(f"Function choice: {func_choice}")
268
+ print(f"Input image shape: {I.shape}")
269
+
270
+ try:
271
+ transformed, inverse_transformed, (gx, gy) = apply_vector_field_transform(
272
+ I, func, radius, (center_x, center_y), strength, edge_smoothness, center_smoothness
273
+ )
274
+ print(f"Transformed image shape: {transformed.shape}")
275
+ print(f"Inverse transformed image shape: {inverse_transformed.shape}")
276
+ print(f"Gradient shapes: gx {gx.shape}, gy {gy.shape}")
277
+ print(f"Gradient ranges: gx [{np.min(gx)}, {np.max(gx)}], gy [{np.min(gy)}, {np.max(gy)}]")
278
+
279
+ vector_field = create_gradient_vector_field(gx, gy, I.shape[:2], reverse=reverse_gradient)
280
+ inverted_vector_field = create_gradient_vector_field(-gx, -gy, I.shape[:2], reverse=False)
281
+
282
+ print(f"Vector field shape: {vector_field.shape}")
283
+ print(f"Inverted vector field shape: {inverted_vector_field.shape}")
284
+ except Exception as e:
285
+ print(f"Error in transformation: {str(e)}")
286
+ traceback.print_exc()
287
+ transformed = np.zeros_like(I)
288
+ inverse_transformed = np.zeros_like(I)
289
+ vector_field = np.zeros_like(I)
290
+ inverted_vector_field = np.zeros_like(I)
291
 
 
 
 
 
292
  result = Image.fromarray(transformed)
293
 
294
  categories = ['Distorted', 'Maze']
295
 
296
  def clean_output(result_values):
297
+ pred, idx, probs = result_values
298
  return dict(zip(categories, map(float, probs)))
299
 
300
  result_bias = learn_bias.predict(result)
301
  result_fresh = learn_fresh.predict(result)
 
302
  result_bias_final = clean_output(result_bias)
303
  result_fresh_final = clean_output(result_fresh)
304
 
305
+ result_localization = model.predict(transformed, save=True)
306
+
307
+ return transformed, result_bias_final, result_fresh_final, vector_field, inverse_transformed, inverted_vector_field
308
 
309
  demo = gr.Interface(
310
  fn=transform_image,
 
328
  ],
329
  outputs=[
330
  gr.Image(label="Transformed Image"),
 
331
  gr.Label(),
332
  gr.Label(),
333
+ gr.Image(label="Gradient Vector Field"),
334
+ gr.Image(label="Inverse Gradient"),
335
+ gr.Image(label="Inverted Vector Field"),
336
  ],
337
  title="Image Transformation Demo!",
338
  article="If you like this demo, please star the github repository for the project! Located [here!](https://github.com/nick-leland/DistortionML)",
temp_app.py DELETED
@@ -1,286 +0,0 @@
1
- import numpy as np
2
- import gradio as gr
3
- from PIL import Image
4
- from scipy import ndimage
5
- import matplotlib.pyplot as plt
6
- from bulk_bulge_generation import definitions, smooth
7
- # from transformers import pipeline
8
- import fastai
9
- from fastcore.all import *
10
- from fastai.vision.all import *
11
- from ultralytics import YOLO
12
-
13
- def apply_vector_field_transform(image, func, radius, center=(0.5, 0.5), strength=1, edge_smoothness=0.1, center_smoothness=0.20):
14
- # 0.106 strength = .50
15
- # 0.106 strength = 1
16
- rows, cols = image.shape[:2]
17
- max_dim = max(rows, cols)
18
-
19
- #Normalize the positions
20
- # Y Needs to be flipped
21
- center_y = int(center[1] * rows)
22
- center_x = int(center[0] * cols)
23
-
24
- # Inverts the Y axis (Numpy is 0 index at top of image)
25
- center_y = abs(rows - center_y)
26
-
27
- print()
28
- print(rows, cols)
29
- print("y =", center_y, "/", rows)
30
- print("x =", center_x, "/", cols)
31
- print()
32
-
33
- pixel_radius = int(max_dim * radius)
34
-
35
- y, x = np.ogrid[:rows, :cols]
36
- y = (y - center_y) / max_dim
37
- x = (x - center_x) / max_dim
38
-
39
- # Calculate distance from center
40
- dist_from_center = np.sqrt(x**2 + y**2)
41
-
42
- # Calculate function values
43
- z = func(x, y)
44
-
45
- # Calculate gradients
46
- gy, gx = np.gradient(z)
47
-
48
- # Creating a sigmoid function to apply to masks
49
- def sigmoid(x, center, steepness):
50
- return 1 / (1 + np.exp(-steepness * (x - center)))
51
-
52
- print(radius)
53
- print(strength)
54
- print(edge_smoothness)
55
- print(center_smoothness)
56
-
57
- # Masking
58
- edge_mask = np.clip((radius - dist_from_center) / (radius * edge_smoothness), 0, 1)
59
-
60
- center_mask = np.clip((dist_from_center - radius * center_smoothness) / (radius * center_smoothness), 0, 1)
61
-
62
- mask = edge_mask * center_mask
63
-
64
- # Apply mask to gradients
65
- gx = gx * mask
66
- gy = gy * mask
67
-
68
- # Normalize gradient vectors
69
- magnitude = np.sqrt(gx**2 + gy**2)
70
- magnitude[magnitude == 0] = 1 # Avoid division by zero
71
- gx = gx / magnitude
72
- gy = gy / magnitude
73
-
74
- # Scale the effect (Play with the number 5)
75
- scale_factor = strength * np.log(max_dim) / 100 # Adjust strength based on image size
76
- gx = gx * scale_factor * mask
77
- gy = gy * scale_factor * mask
78
-
79
- # Create the mapping
80
- x_new = x + gx
81
- y_new = y + gy
82
-
83
- # Convert back to pixel coordinates
84
- x_new = x_new * max_dim + center_x
85
- y_new = y_new * max_dim + center_y
86
-
87
- # Ensure the new coordinates are within the image boundaries
88
- x_new = np.clip(x_new, 0, cols - 1)
89
- y_new = np.clip(y_new, 0, rows - 1)
90
-
91
- # Apply the transformation to each channel
92
- channels = [ndimage.map_coordinates(image[..., i], [y_new, x_new], order=1, mode='reflect')
93
- for i in range(image.shape[2])]
94
-
95
- transformed_image = np.dstack(channels).astype(image.dtype)
96
-
97
- return transformed_image, (gx, gy)
98
-
99
-
100
- def create_gradient_vector_field(gx, gy, image_shape, step=20, reverse=False):
101
- """
102
- Create a gradient vector field visualization with option to reverse direction.
103
-
104
- :param gx: X-component of the gradient
105
- :param gy: Y-component of the gradient
106
- :param image_shape: Shape of the original image (height, width)
107
- :param step: Spacing between arrows
108
- :param reverse: If True, reverse the direction of the arrows
109
- :return: Gradient vector field as a numpy array (RGB image)
110
- """
111
- rows, cols = image_shape
112
- y, x = np.mgrid[step/2:rows:step, step/2:cols:step].reshape(2, -1).astype(int)
113
-
114
- # Calculate the scale based on image size
115
- max_dim = max(rows, cols)
116
- scale = max_dim / 1000 # Adjusted for longer arrows
117
-
118
- # Reverse direction if specified
119
- direction = -1 if reverse else 1
120
-
121
- fig, ax = plt.subplots(figsize=(cols/50, rows/50), dpi=100)
122
- ax.quiver(x, y, direction * gx[y, x], direction * -gy[y, x],
123
- scale=scale,
124
- scale_units='width',
125
- width=0.002 * max_dim / 500,
126
- headwidth=8,
127
- headlength=12,
128
- headaxislength=0,
129
- color='black',
130
- minshaft=2,
131
- minlength=0,
132
- pivot='tail')
133
- ax.set_xlim(0, cols)
134
- ax.set_ylim(rows, 0)
135
- ax.set_aspect('equal')
136
- ax.axis('off')
137
-
138
- fig.tight_layout(pad=0)
139
- fig.canvas.draw()
140
- vector_field = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
141
- vector_field = vector_field.reshape(fig.canvas.get_width_height()[::-1] + (3,))
142
- plt.close(fig)
143
-
144
- return vector_field
145
-
146
-
147
- #############################
148
- # MAIN FUNCTION HERE
149
- #############################
150
-
151
- # pipeline = pipeline(task="image-classification", model="nick-leland/distortionml")
152
-
153
- # Version Check
154
- print(f"NumPy version: {np.__version__}")
155
- print(f"PyTorch version: {torch.__version__}")
156
- print(f"FastAI version: {fastai.__version__}")
157
-
158
- learn_bias = load_learner('model_bias.pkl')
159
- learn_fresh = load_learner('model_fresh.pkl')
160
-
161
- # Loads the YOLO Model
162
- model = YOLO("bulge_yolo_model.pt")
163
-
164
-
165
- def transform_image(image, func_choice, randomization_check, radius, center_x, center_y, strength, reverse_gradient=True, spiral_frequency=1):
166
- I = np.asarray(Image.open(image))
167
-
168
- def pinch(x, y):
169
- return x**2 + y**2
170
-
171
- def shift(x, y):
172
- return np.arctan2(y, x)
173
-
174
- def bulge(x, y):
175
- r = -np.sqrt(x**2 + y**2)
176
- return r
177
-
178
- def bulge_inverse(x, y, f=bulge, a=1, b=1, c=1, d=0, e=0):
179
- t = np.arctan2(y, x)
180
- term = ((f - e) / (-a))**2 - d
181
- if term < 0:
182
- return None, None
183
-
184
- x = (1/np.sqrt(b)) * np.sqrt(term) * np.cos(t)
185
- y = (1/np.sqrt(c)) * np.sqrt(term) * np.sin(t)
186
-
187
- return x, y
188
-
189
- def spiral(x, y, frequency=1):
190
- r = np.sqrt(x**2 + y**2)
191
- theta = np.arctan2(y, x)
192
- return r * np.sin(theta - frequency * r)
193
-
194
- rng = np.random.default_rng()
195
- if randomization_check == True:
196
- radius, location, strength, edge_smoothness= definitions(rng)
197
- center_x = location[0]
198
- center_y = location[1]
199
-
200
- # Temporarily disabling and using these values.
201
- # edge_smoothness = 0.25 * strength
202
- # center_smoothness = 0.25 * strength
203
- edge_smoothness, center_smoothness = smooth(rng, strength)
204
-
205
-
206
- if func_choice == "Pinch":
207
- func = pinch
208
- elif func_choice == "Spiral":
209
- func = shift
210
- elif func_choice == "Bulge":
211
- func = bulge
212
- func2 = bulge_inverse
213
- edge_smoothness = 0
214
- center_smoothness = 0
215
- elif func_choice == "Volcano":
216
- func = bulge
217
- elif func_choice == "Shift Up":
218
- func = lambda x, y: spiral(x, y, frequency=spiral_frequency)
219
-
220
-
221
- # Original Image Transformation
222
- transformed, (gx, gy) = apply_vector_field_transform(I, func, radius, (center_x, center_y), strength, edge_smoothness, center_smoothness)
223
- vector_field = create_gradient_vector_field(gx, gy, I.shape[:2], reverse=reverse_gradient)
224
-
225
- reverted, (gx_inverse, gy_inverse) = apply_vector_field_transform(I, func2, radius, (center_x, center_y), strength, edge_smoothness, center_smoothness)
226
- vector_field_reverted = create_gradient_vector_field(gx_inverse, gy_inverse, I.shape[:2], reverse=reverse_gradient)
227
-
228
-
229
- # GRADIO CHANGE HERE
230
- # predictions = pipeline(transformed)
231
-
232
- # Have to convert to image first
233
- result = Image.fromarray(transformed)
234
-
235
- categories = ['Distorted', 'Maze']
236
-
237
- def clean_output(result_values):
238
- pred, idx, probs = result_values[0], result_values[1], result_values[2]
239
- return dict(zip(categories, map(float, probs)))
240
-
241
- result_bias = learn_bias.predict(result)
242
- result_fresh = learn_fresh.predict(result)
243
- print("Results")
244
- result_bias_final = clean_output(result_bias)
245
- result_fresh_final = clean_output(result_fresh)
246
-
247
- print("saving?")
248
- result_localization = model.predict(transformed, save=True)
249
- print(result_localization)
250
-
251
- return transformed, result_bias_final, result_fresh_final, vector_field, vector_field_reverted
252
-
253
- demo = gr.Interface(
254
- fn=transform_image,
255
- inputs=[
256
- gr.Image(type="filepath"),
257
- gr.Dropdown(["Pinch", "Spiral", "Shift Up", "Bulge", "Volcano"], value="Volcano", label="Function"),
258
- gr.Checkbox(label="Randomize inputs?"),
259
- gr.Slider(0, 0.5, value=0.25, label="Radius (as fraction of image size)"),
260
- gr.Slider(0, 1, value=0.5, label="Center X"),
261
- gr.Slider(0, 1, value=0.5, label="Center Y"),
262
- gr.Slider(0, 1, value=0.5, label="Strength"),
263
- # gr.Slider(0, 1, value=0.5, label="Edge Smoothness"),
264
- # gr.Slider(0, 0.5, value=0.1, label="Center Smoothness")
265
- # gr.Checkbox(label="Reverse Gradient Direction"),
266
- ],
267
- examples=[
268
- [np.asarray(Image.open("examples/1500_maze.jpg")), "Bulge", True, 0.25, 0.5, 0.5, 0.5],
269
- [np.asarray(Image.open("examples/2048_maze.jpg")), "Bulge", True, 0.25, 0.5, 0.5, 0.5],
270
- [np.asarray(Image.open("examples/2300_fresh.jpg")), "Bulge", True, 0.25, 0.5, 0.5, 0.5],
271
- [np.asarray(Image.open("examples/50_fresh.jpg")), "Bulge", True, 0.25, 0.5, 0.5, 0.5]
272
- ],
273
- outputs=[
274
- gr.Image(label="Transformed Image"),
275
- # gr.Image(label="Result", num_top_classes=2)
276
- gr.Label(),
277
- gr.Label(),
278
- gr.Image(label="Gradient Vector Field"),
279
- gr.Image(label="Gradient Vector Field Reverted")
280
- ],
281
- title="Image Transformation Demo!",
282
- article="If you like this demo, please star the github repository for the project! Located [here!](https://github.com/nick-leland/DistortionML)",
283
- description="This is the baseline function that will be used to generate the database for a machine learning model I am working on called 'DistortionMl'! The goal of this model is to detect and then reverse image transformations that can be generated here!\nYou can read more about the project at [this repository link](https://github.com/nick-leland/DistortionML). The main function that I was working on is the 'Bulge'/'Volcano' function, I can't really guarantee that the others work as well!\nI have just added the first baseline ML model to detect if a distortion has taken place! It was only trained on mazes though ([Dataset Here](https://www.kaggle.com/datasets/nickleland/distorted-mazes)) so in order for it to detect a distortion you have to use one of the images provided in the examples! Feel free to mess around wtih other images in the meantime though!"
284
- )
285
-
286
- demo.launch(share=True)