xiaoyao9184 commited on
Commit
4d66995
·
verified ·
1 Parent(s): 1b16e50

Synced repo using 'sync_with_huggingface' Github Action

Browse files
Files changed (3) hide show
  1. gradio_app.py +447 -402
  2. multiwm.py +10 -0
  3. requirements.txt +2 -1
gradio_app.py CHANGED
@@ -5,418 +5,463 @@ if "APP_PATH" in os.environ:
5
  # fix sys.path for import
6
  sys.path.append(os.getcwd())
7
 
8
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- import re
11
- import string
12
- import random
13
- import os
14
- import numpy as np
15
- from PIL import Image
16
- import torch
17
- import torch.nn.functional as F
18
- from torchvision import transforms
19
-
20
-
21
- from watermark_anything.data.metrics import msg_predict_inference
22
- from notebooks.inference_utils import (
23
- load_model_from_checkpoint,
24
- default_transform,
25
- unnormalize_img,
26
- create_random_mask,
27
- plot_outputs,
28
- msg2str,
29
- torch_to_np,
30
- multiwm_dbscan
31
- )
32
-
33
- # Device configuration
34
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
-
36
- # Constants
37
- proportion_masked = 0.5 # Proportion of image to be watermarked
38
- epsilon = 1 # min distance between decoded messages in a cluster
39
- min_samples = 500 # min number of pixels in a 256x256 image to form a cluster
40
-
41
- # Color map for visualization
42
- color_map = {
43
- -1: [0, 0, 0], # Black for -1
44
- 0: [255, 0, 255], # ? for 0
45
- 1: [255, 0, 0], # Red for 1
46
- 2: [0, 255, 0], # Green for 2
47
- 3: [0, 0, 255], # Blue for 3
48
- 4: [255, 255, 0], # Yellow for 4
49
- }
50
-
51
- def load_wam():
52
- # Load the model from the specified checkpoint
53
- exp_dir = "checkpoints"
54
- json_path = os.path.join(exp_dir, "params.json")
55
- ckpt_path = os.path.join(exp_dir, 'checkpoint.pth')
56
- wam = load_model_from_checkpoint(json_path, ckpt_path).to(device).eval()
57
- return wam
58
-
59
- def image_detect(img_pil: Image.Image) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):
60
- img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
61
-
62
- # Detect the watermark in the multi-watermarked image
63
- preds = wam.detect(img_pt)["preds"] # [1, 33, 256, 256]
64
- mask_preds = F.sigmoid(preds[:, 0, :, :]) # [1, 256, 256], predicted mask
65
- mask_preds_res = F.interpolate(mask_preds.unsqueeze(1), size=(img_pt.shape[-2], img_pt.shape[-1]), mode="bilinear", align_corners=False) # [1, 1, H, W]
66
- bit_preds = preds[:, 1:, :, :] # [1, 32, 256, 256], predicted bits
67
-
68
- # positions has the cluster number at each pixel. can be upsaled back to the original size.
69
- try:
70
- centroids, positions = multiwm_dbscan(bit_preds, mask_preds, epsilon=epsilon, min_samples=min_samples)
71
- centroids_pt = torch.stack(list(centroids.values()))
72
- except (UnboundLocalError) as e:
73
- print(f"Error while detecting watermark: {e}")
74
- positions = None
75
- centroids = None
76
- centroids_pt = None
77
-
78
- return img_pt, (mask_preds_res>0.5).float(), positions, centroids, centroids_pt
79
-
80
- def image_embed(img_pil: Image.Image, wm_msgs: torch.Tensor, wm_masks: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):
81
- img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
82
-
83
- # Embed the watermark message into the image
84
- # Mask to use. 1 values correspond to pixels where the watermark will be placed.
85
- multi_wm_img = img_pt.clone()
86
- for ii in range(len(wm_msgs)):
87
- wm_msg, mask = wm_msgs[ii].unsqueeze(0), wm_masks[ii]
88
- outputs = wam.embed(img_pt, wm_msg)
89
- multi_wm_img = outputs['imgs_w'] * mask + multi_wm_img * (1 - mask)
90
-
91
- torch.cuda.empty_cache()
92
- return img_pt, multi_wm_img, wm_masks.sum(0)
93
-
94
- def create_bounding_mask(img_size, boxes):
95
- """Create a binary mask from bounding boxes.
96
-
97
- Args:
98
- img_size (tuple): Image size (height, width)
99
- boxes (list): List of tuples (x1, y1, x2, y2) defining bounding boxes
100
-
101
- Returns:
102
- torch.Tensor: Binary mask tensor
103
- """
104
- mask = torch.zeros(img_size)
105
- for x1, y1, x2, y2 in boxes:
106
- mask[y1:y2, x1:x2] = 1
107
- return mask
108
-
109
- def centroid_to_hex(centroid):
110
- binary_int = 0
111
- for bit in centroid:
112
- binary_int = (binary_int << 1) | int(bit.item())
113
- return format(binary_int, '08x')
114
-
115
- # Load the model
116
- wam = load_wam()
117
-
118
- def detect_watermark(image):
119
- if image is None:
120
- return None, None, None, {"status": "error", "messages": [], "error": "No image provided"}
121
-
122
- img_pil = Image.fromarray(image).convert("RGB")
123
- det_img, pred, positions, centroids, centroids_pt = image_detect(img_pil)
124
-
125
- # Convert tensor images to numpy for display
126
- detected_img = torch_to_np(det_img.detach())
127
- pred_mask = torch_to_np(pred.detach().repeat(1, 3, 1, 1))
128
-
129
- # Create cluster visualization
130
- if positions is not None:
131
- resize_ori = transforms.Resize(det_img.shape[-2:])
132
- rgb_image = torch.zeros((3, positions.shape[-1], positions.shape[-2]), dtype=torch.uint8)
133
- for value, color in color_map.items():
134
- mask_ = positions == value
135
- for channel, color_value in enumerate(color):
136
- rgb_image[channel][mask_.squeeze()] = color_value
137
- rgb_image = resize_ori(rgb_image.float()/255)
138
- cluster_viz = rgb_image.permute(1, 2, 0).numpy()
139
-
140
- # Create message output as JSON
141
- messages = []
142
- for key in centroids.keys():
143
- centroid_hex = centroid_to_hex(centroids[key])
144
- centroid_hex_array = "-".join([centroid_hex[i:i+4] for i in range(0, len(centroid_hex), 4)])
145
- messages.append({
146
- "id": int(key),
147
- "message": centroid_hex_array,
148
- "color": color_map[key]
149
- })
150
- message_json = {
151
- "status": "success",
152
- "messages": messages,
153
- "count": len(messages)
154
- }
155
- else:
156
- cluster_viz = np.zeros_like(detected_img)
157
  message_json = {
158
- "status": "no_detection",
159
- "messages": [],
160
- "count": 0
161
  }
162
 
163
- return pred_mask, cluster_viz, message_json
164
-
165
- def embed_watermark(image, wm_num, wm_type, wm_str, wm_loc):
166
- if image is None:
167
- return None, None, {
168
- "status": "failure",
169
- "messages": "No image provided"
170
- }
171
 
172
- if wm_type == "input":
173
- if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (wm_num-1), wm_str):
174
- tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(wm_num)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  return None, None, {
176
  "status": "failure",
177
- "messages": f"Invalid type input. Please use {tip}"
178
  }
179
 
180
- if wm_loc == "bounding":
181
- if ROI_coordinates['clicks'] != wm_num * 2:
182
- return None, None, {
183
- "status": "failure",
184
- "messages": "Invalid location input. Please draw at least %d bounding ROI" % (wm_num)
185
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
- img_pil = Image.fromarray(image).convert("RGB")
188
-
189
- # Generate watermark messages based on type
190
- wm_msgs = []
191
- if wm_type == "random":
192
- chars = '-'.join(''.join(random.choice(string.hexdigits) for _ in range(4)) for _ in range(wm_num * 2))
193
- wm_str = chars.lower()
194
- wm_hex = wm_str.replace("-", "")
195
- for i in range(0, len(wm_hex), 8):
196
- chunk = wm_hex[i:i+8]
197
- binary = bin(int(chunk, 16))[2:].zfill(32)
198
- wm_msgs.append([int(b) for b in binary])
199
- # Define a 32-bit message to be embedded into the images
200
- wm_msgs = torch.tensor(wm_msgs, dtype=torch.float32).to(device)
201
-
202
- # Create mask based on location type
203
- wm_masks = None
204
- if wm_loc == "random":
205
- img_pt = default_transform(img_pil).unsqueeze(0).to(device)
206
- # To ensure at least `proportion_masked %` of the width is randomly usable,
207
- # otherwise, it is easy to enter an infinite loop and fail to find a usable width.
208
- mask_percentage = img_pil.height / img_pil.width * proportion_masked / wm_num
209
- wm_masks = create_random_mask(img_pt, num_masks=wm_num, mask_percentage=mask_percentage)
210
- elif wm_loc == "bounding" and sections:
211
- wm_masks = torch.zeros((len(sections), 1, img_pil.height, img_pil.width), dtype=torch.float32).to(device)
212
- for idx, ((x_start, y_start, x_end, y_end), _) in enumerate(sections):
213
- left = min(x_start, x_end)
214
- right = max(x_start, x_end)
215
- top = min(y_start, y_end)
216
- bottom = max(y_start, y_end)
217
- wm_masks[idx, 0, top:bottom, left:right] = 1
218
-
219
-
220
- img_pt, embed_img_pt, embed_mask_pt = image_embed(img_pil, wm_msgs, wm_masks)
221
-
222
- # Convert to numpy for display
223
- img_np = torch_to_np(embed_img_pt.detach())
224
- mask_np = torch_to_np(embed_mask_pt.detach().expand(3, -1, -1))
225
- message_json = {
226
- "status": "success",
227
- "messages": wm_str
228
  }
229
- return img_np, mask_np, message_json
230
-
231
-
232
-
233
- # ROI means Region Of Interest. It is the region where the user clicks
234
- # to specify the location of the watermark.
235
- ROI_coordinates = {
236
- 'x_temp': 0,
237
- 'y_temp': 0,
238
- 'x_new': 0,
239
- 'y_new': 0,
240
- 'clicks': 0,
241
- }
242
-
243
- sections = []
244
-
245
- def get_select_coordinates(img, evt: gr.SelectData, num):
246
- if ROI_coordinates['clicks'] >= num * 2:
247
- gr.Warning(f"Cant add more than {num} of Watermarks.")
248
- return (img, sections)
249
-
250
- # update new coordinates
251
- ROI_coordinates['clicks'] += 1
252
- ROI_coordinates['x_temp'] = ROI_coordinates['x_new']
253
- ROI_coordinates['y_temp'] = ROI_coordinates['y_new']
254
- ROI_coordinates['x_new'] = evt.index[0]
255
- ROI_coordinates['y_new'] = evt.index[1]
256
- # compare start end coordinates
257
- x_start = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] < ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
258
- y_start = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] < ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
259
- x_end = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] > ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
260
- y_end = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] > ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
261
- if ROI_coordinates['clicks'] % 2 == 0:
262
- sections[len(sections) - 1] = ((x_start, y_start, x_end, y_end), f"Mask {len(sections)}")
263
- # both start and end point get
264
- return (img, sections)
265
- else:
266
- point_width = int(img.shape[0]*0.05)
267
- sections.append(((ROI_coordinates['x_new'], ROI_coordinates['y_new'],
268
- ROI_coordinates['x_new'] + point_width, ROI_coordinates['y_new'] + point_width),
269
- f"Click second point for Mask {len(sections) + 1}"))
270
- return (img, sections)
271
-
272
- def del_select_coordinates(img, evt: gr.SelectData):
273
- del sections[evt.index]
274
- # recreate section names
275
- for i in range(len(sections)):
276
- sections[i] = (sections[i][0], f"Mask {i + 1}")
277
-
278
- # last section clicking second point not complete
279
- if ROI_coordinates['clicks'] % 2 != 0:
280
- if len(sections) == evt.index:
281
- # delete last section
282
- ROI_coordinates['clicks'] -= 1
283
  else:
284
- # recreate last section name for second point
285
  ROI_coordinates['clicks'] -= 2
286
- sections[len(sections) - 1] = (sections[len(sections) - 1][0], f"Click second point for Mask {len(sections) + 1}")
287
- else:
288
- ROI_coordinates['clicks'] -= 2
289
-
290
- return (img[0], sections)
291
-
292
- with gr.Blocks(title="Watermark Anything Demo") as demo:
293
- gr.Markdown("""
294
- # Watermark Anything Demo
295
- This app demonstrates watermark detection and embedding using the Watermark Anything model.
296
- Find the project [here](https://github.com/facebookresearch/watermark-anything).
297
- """)
298
-
299
- with gr.Tabs():
300
- with gr.TabItem("Embed Watermark"):
301
- with gr.Row():
302
- with gr.Column():
303
- embedding_img = gr.Image(label="Input Image", type="numpy")
304
-
305
- with gr.Column():
306
- embedding_box = gr.AnnotatedImage(
307
- visible=False,
308
- label="ROI: Click on left 'Input Image'",
309
- color_map={
310
- "ROI of Watermark embedding": "#9987FF",
311
- "Click second point for ROI": "#f44336"}
312
- )
313
-
314
- embedding_num = gr.Slider(1, 5, value=1, step=1, label="Number of Watermarks")
315
- embedding_type = gr.Radio(["random", "input"], value="random", label="Type", info="Type of watermarks")
316
- embedding_str = gr.Textbox(label="Watermark Text", visible=False, show_copy_button=True)
317
- embedding_loc = gr.Radio(["random", "bounding"], value="random", label="Location", info="Location of watermarks")
318
-
319
- embedding_btn = gr.Button("Embed Watermark")
320
- marked_msg = gr.JSON(label="Marked Messages")
321
- with gr.Row():
322
- marked_image = gr.Image(label="Watermarked Image")
323
- marked_mask = gr.Image(label="Position of the watermark")
324
-
325
- embedding_img.select(
326
- fn=get_select_coordinates,
327
- inputs=[embedding_img, embedding_num],
328
- outputs=embedding_box)
329
- embedding_box.select(
330
- fn=del_select_coordinates,
331
- inputs=embedding_box,
332
- outputs=embedding_box
333
- )
334
-
335
- # The inability to dynamically render `AnnotatedImage` is because,
336
- # when placed inside `gr.Column()`, it prevents listeners from being added to controls outside the column.
337
- # Dynamically adding a select listener will not change the cursor shape of the Image.
338
- # So `render` cannot work properly in this scenario.
339
- #
340
- # @gr.render(inputs=embedding_loc)
341
- # def show_split(wm_loc):
342
- # if wm_loc == "bounding":
343
- # embedding_img.select(
344
- # fn=get_select_coordinates,
345
- # inputs=[embedding_img, embedding_num],
346
- # outputs=embedding_box)
347
- # embedding_box.select(
348
- # fn=del_select_coordinates,
349
- # inputs=embedding_box,
350
- # outputs=embedding_box
351
- # )
352
- # else:
353
- # embedding_img.select()
354
-
355
- def visible_box_image(img, wm_loc):
356
- if wm_loc == "bounding":
357
- return gr.update(visible=True, value=(img,sections))
358
- else:
359
- sections.clear()
360
- ROI_coordinates['clicks'] = 0
361
- return gr.update(visible=False, value=(img,sections))
362
- embedding_loc.change(
363
- fn=visible_box_image,
364
- inputs=[embedding_img, embedding_loc],
365
- outputs=[embedding_box]
366
- )
367
-
368
- def visible_text_label(embedding_type, embedding_num):
369
- if embedding_type == "input":
370
- tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
371
- return gr.update(visible=True, label=f"Watermark Text (Format: {tip})")
372
- else:
373
- return gr.update(visible=False)
374
-
375
- def check_embedding_str(embedding_str, embedding_num):
376
- if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (embedding_num-1), embedding_str):
377
- tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
378
- gr.Warning(f"Invalid format. Please use {tip}", duration=0)
379
- return gr.update(interactive=False)
380
- else:
381
- return gr.update(interactive=True)
382
-
383
- embedding_num.change(
384
- fn=visible_text_label,
385
- inputs=[embedding_type, embedding_num],
386
- outputs=[embedding_str]
387
- )
388
- embedding_type.change(
389
- fn=visible_text_label,
390
- inputs=[embedding_type, embedding_num],
391
- outputs=[embedding_str]
392
- )
393
- embedding_str.change(
394
- fn=check_embedding_str,
395
- inputs=[embedding_str, embedding_num],
396
- outputs=[embedding_btn]
397
- )
398
-
399
- embedding_btn.click(
400
- fn=embed_watermark,
401
- inputs=[embedding_img, embedding_num, embedding_type, embedding_str, embedding_loc],
402
- outputs=[marked_image, marked_mask, marked_msg]
403
- )
404
-
405
- with gr.TabItem("Detect Watermark"):
406
- with gr.Row():
407
- with gr.Column():
408
- detecting_img = gr.Image(label="Input Image", type="numpy")
409
- with gr.Column():
410
- detecting_btn = gr.Button("Detect Watermark")
411
- predicted_messages = gr.JSON(label="Detected Messages")
412
- with gr.Row():
413
- predicted_mask = gr.Image(label="Predicted Watermark Position")
414
- predicted_cluster = gr.Image(label="Watermark Clusters")
415
-
416
- detecting_btn.click(
417
- fn=detect_watermark,
418
- inputs=[detecting_img],
419
- outputs=[predicted_mask, predicted_cluster, predicted_messages]
420
- )
421
-
422
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  # fix sys.path for import
6
  sys.path.append(os.getcwd())
7
 
8
+ # here the subprocess stops loading, because __name__ is NOT '__main__'
9
+ # gradio will reload
10
+ if '__main__' == __name__:
11
+
12
+ import gradio as gr
13
+
14
+ import os
15
+ import re
16
+ import string
17
+ import random
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from torchvision import transforms
21
+ from PIL import Image
22
+
23
+ from watermark_anything.data.metrics import msg_predict_inference
24
+ from notebooks.inference_utils import (
25
+ load_model_from_checkpoint,
26
+ default_transform,
27
+ create_random_mask,
28
+ torch_to_np
29
+ )
30
+
31
+ import time
32
+ from multiwm import dbscan
33
+
34
+ max_timeout = int(os.environ.get("MAX_TIMEOUT", 60))
35
+
36
+ # Device configuration
37
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38
+
39
+ # Seed
40
+ seed = 42
41
+ torch.manual_seed(seed)
42
+
43
+ # Constants
44
+ proportion_masked = 0.5 # Proportion of image to be watermarked
45
+ epsilon = 1 # min distance between decoded messages in a cluster
46
+ min_samples = 500 # min number of pixels in a 256x256 image to form a cluster
47
+
48
+ # Color map for visualization
49
+ color_map = {
50
+ -1: [0, 0, 0], # Black for -1
51
+ 0: [255, 0, 255], # ? for 0
52
+ 1: [255, 0, 0], # Red for 1
53
+ 2: [0, 255, 0], # Green for 2
54
+ 3: [0, 0, 255], # Blue for 3
55
+ 4: [255, 255, 0], # Yellow for 4
56
+ 5: [0, 255, 255], # ?
57
+ }
58
 
59
+ def load_wam():
60
+ # Load the model from the specified checkpoint
61
+ exp_dir = "checkpoints"
62
+ json_path = os.path.join(exp_dir, "params.json")
63
+ ckpt_path = os.path.join(exp_dir, 'checkpoint.pth')
64
+ wam = load_model_from_checkpoint(json_path, ckpt_path).to(device).eval()
65
+ return wam
66
+
67
+ def image_detect(img_pil: Image.Image, scan_mult: bool=False, timeout_seconds: int=5) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):
68
+ img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
69
+
70
+ # Detect the watermark in the multi-watermarked image
71
+ preds = wam.detect(img_pt)["preds"] # [1, 33, 256, 256]
72
+ mask_preds = F.sigmoid(preds[:, 0, :, :]) # [1, 256, 256], predicted mask
73
+ bit_preds = preds[:, 1:, :, :] # [1, 32, 256, 256], predicted bits
74
+
75
+ mask_preds_res = F.interpolate(mask_preds.unsqueeze(1), size=(img_pt.shape[-2], img_pt.shape[-1]), mode="bilinear", align_corners=False) # [1, 1, H, W]
76
+ message_pred_inf = msg_predict_inference(bit_preds, mask_preds).cpu().float() # [1, 32]
77
+ if message_pred_inf.sum() == 0:
78
+ message_pred_inf = None
79
+
80
+ centroids, positions = None, None
81
+ if(scan_mult):
82
+ try:
83
+ centroids, positions = dbscan(bit_preds, mask_preds, epsilon, min_samples, dec_timeout=timeout_seconds)
84
+ except TimeoutError:
85
+ print("Timeout error in multiwm task!")
86
+ except (UnboundLocalError) as e:
87
+ print(f"Error while detecting watermark: {e}")
88
+
89
+ return img_pt, (mask_preds_res>0.5).float(), message_pred_inf, positions, centroids
90
+
91
+ def image_embed(img_pil: Image.Image, wm_msgs: torch.Tensor, wm_masks: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):
92
+ img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
93
+
94
+ # Embed the watermark message into the image
95
+ # Mask to use. 1 values correspond to pixels where the watermark will be placed.
96
+ multi_wm_img = img_pt.clone()
97
+ for ii in range(len(wm_msgs)):
98
+ wm_msg, mask = wm_msgs[ii].unsqueeze(0), wm_masks[ii]
99
+ outputs = wam.embed(img_pt, wm_msg)
100
+ multi_wm_img = outputs['imgs_w'] * mask + multi_wm_img * (1 - mask)
101
+
102
+ return img_pt, multi_wm_img, wm_masks.sum(0)
103
+
104
+ def create_bounding_mask(img_size, boxes):
105
+ """Create a binary mask from bounding boxes.
106
+
107
+ Args:
108
+ img_size (tuple): Image size (height, width)
109
+ boxes (list): List of tuples (x1, y1, x2, y2) defining bounding boxes
110
+
111
+ Returns:
112
+ torch.Tensor: Binary mask tensor
113
+ """
114
+ mask = torch.zeros(img_size)
115
+ for x1, y1, x2, y2 in boxes:
116
+ mask[y1:y2, x1:x2] = 1
117
+ return mask
118
+
119
+ def centroid_to_hex(centroid):
120
+ binary_int = 0
121
+ for bit in centroid:
122
+ binary_int = (binary_int << 1) | int(bit.item())
123
+ return format(binary_int, '08x')
124
+
125
+ # Load the model
126
+ wam = load_wam()
127
+
128
+ def detect_watermark(image, multi, timeout):
129
+ if image is None:
130
+ return None, None, None, {"status": "error", "messages": [], "error": "No image provided"}
131
+
132
+ start_time = time.time()
133
+
134
+ img_pil = Image.fromarray(image).convert("RGB")
135
+ det_img, mask_preds_res, message_pred_inf, positions, centroids = image_detect(img_pil, multi, timeout)
136
+
137
+ # Convert tensor images to numpy for display
138
+ pred_mask = torch_to_np(mask_preds_res.detach().repeat(1, 3, 1, 1))
139
+
140
+ cluster_viz = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  message_json = {
142
+ "status": "none-detected"
 
 
143
  }
144
 
145
+ if message_pred_inf is not None:
146
+ cluster_viz = pred_mask
 
 
 
 
 
 
147
 
148
+ centroid_hex = centroid_to_hex(message_pred_inf[0])
149
+ centroid_hex_array = "-".join([centroid_hex[i:i+4] for i in range(0, len(centroid_hex), 4)])
150
+ message_json['status'] = "one-detected"
151
+ message_json['message'] = centroid_hex_array
152
+
153
+ # Create cluster visualization
154
+ if positions is not None:
155
+ resize_ori = transforms.Resize(det_img.shape[-2:])
156
+ rgb_image = torch.zeros((3, positions.shape[-1], positions.shape[-2]), dtype=torch.uint8)
157
+ for value, color in color_map.items():
158
+ mask_ = positions == value
159
+ for channel, color_value in enumerate(color):
160
+ rgb_image[channel][mask_.squeeze()] = color_value
161
+ rgb_image = resize_ori(rgb_image.float()/255)
162
+ cluster_viz = rgb_image.permute(1, 2, 0).numpy()
163
+
164
+ # Create message output as JSON
165
+ messages = []
166
+ for key in centroids.keys():
167
+ centroid_hex = centroid_to_hex(centroids[key])
168
+ centroid_hex_array = "-".join([centroid_hex[i:i+4] for i in range(0, len(centroid_hex), 4)])
169
+ messages.append({
170
+ "id": int(key),
171
+ "message": centroid_hex_array,
172
+ "color": color_map[key]
173
+ })
174
+ message_json['status'] = "multi-detected"
175
+ message_json['cluster'] = messages
176
+
177
+ run_time = time.time() - start_time
178
+ message_json['run_time'] = run_time
179
+
180
+ color_md = []
181
+ if "cluster" in message_json:
182
+ for item in message_json["cluster"]:
183
+ key = item["id"]
184
+ msg = item["message"]
185
+ color_md.append(f'<code style="color:rgb{tuple(color_map[key])}">{msg}</code>')
186
+
187
+ return pred_mask, cluster_viz, message_json, "\n".join(color_md)
188
+
189
+ def embed_watermark(image, wm_num, wm_type, wm_str, wm_loc):
190
+ if image is None:
191
  return None, None, {
192
  "status": "failure",
193
+ "messages": "No image provided"
194
  }
195
 
196
+ if wm_type == "input":
197
+ if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (wm_num-1), wm_str):
198
+ tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(wm_num)])
199
+ return None, None, {
200
+ "status": "failure",
201
+ "messages": f"Invalid type input. Please use {tip}"
202
+ }
203
+
204
+ if wm_loc == "bounding":
205
+ if ROI_coordinates['clicks'] != wm_num * 2:
206
+ return None, None, {
207
+ "status": "failure",
208
+ "messages": "Invalid location input. Please draw at least %d bounding ROI" % (wm_num)
209
+ }
210
+
211
+ img_pil = Image.fromarray(image).convert("RGB")
212
+
213
+ # Generate watermark messages based on type
214
+ wm_msgs = []
215
+ if wm_type == "random":
216
+ chars = '-'.join(''.join(random.choice(string.hexdigits) for _ in range(4)) for _ in range(wm_num * 2))
217
+ wm_str = chars.lower()
218
+ wm_hex = wm_str.replace("-", "")
219
+ for i in range(0, len(wm_hex), 8):
220
+ chunk = wm_hex[i:i+8]
221
+ binary = bin(int(chunk, 16))[2:].zfill(32)
222
+ wm_msgs.append([int(b) for b in binary])
223
+ # Define a 32-bit message to be embedded into the images
224
+ wm_msgs = torch.tensor(wm_msgs, dtype=torch.float32).to(device)
225
+
226
+ # Create mask based on location type
227
+ wm_masks = None
228
+ if wm_loc == "random":
229
+ img_pt = default_transform(img_pil).unsqueeze(0).to(device)
230
+ # To ensure at least `proportion_masked %` of the width is randomly usable,
231
+ # otherwise, it is easy to enter an infinite loop and fail to find a usable width.
232
+ mask_percentage = min(img_pil.height, img_pil.width) / max(img_pil.height, img_pil.width) * proportion_masked / wm_num
233
+ wm_masks = create_random_mask(img_pt, num_masks=wm_num, mask_percentage=mask_percentage)
234
+ elif wm_loc == "bounding" and sections:
235
+ wm_masks = torch.zeros((len(sections), 1, img_pil.height, img_pil.width), dtype=torch.float32).to(device)
236
+ for idx, ((x_start, y_start, x_end, y_end), _) in enumerate(sections):
237
+ left = min(x_start, x_end)
238
+ right = max(x_start, x_end)
239
+ top = min(y_start, y_end)
240
+ bottom = max(y_start, y_end)
241
+ wm_masks[idx, 0, top:bottom, left:right] = 1
242
+
243
+
244
+ img_pt, embed_img_pt, embed_mask_pt = image_embed(img_pil, wm_msgs, wm_masks)
245
+
246
+ # Convert to numpy for display
247
+ img_np = torch_to_np(embed_img_pt.detach())
248
+ mask_np = torch_to_np(embed_mask_pt.detach().expand(3, -1, -1))
249
+ message_json = {
250
+ "status": "success",
251
+ "messages": wm_str
252
+ }
253
+ return img_np, mask_np, message_json
254
 
255
+
256
+
257
+ # ROI means Region Of Interest. It is the region where the user clicks
258
+ # to specify the location of the watermark.
259
+ ROI_coordinates = {
260
+ 'x_temp': 0,
261
+ 'y_temp': 0,
262
+ 'x_new': 0,
263
+ 'y_new': 0,
264
+ 'clicks': 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  }
266
+
267
+ sections = []
268
+
269
+ def get_select_coordinates(img, evt: gr.SelectData, num):
270
+ if ROI_coordinates['clicks'] >= num * 2:
271
+ gr.Warning(f"Cant add more than {num} of Watermarks.")
272
+ return (img, sections)
273
+
274
+ # update new coordinates
275
+ ROI_coordinates['clicks'] += 1
276
+ ROI_coordinates['x_temp'] = ROI_coordinates['x_new']
277
+ ROI_coordinates['y_temp'] = ROI_coordinates['y_new']
278
+ ROI_coordinates['x_new'] = evt.index[0]
279
+ ROI_coordinates['y_new'] = evt.index[1]
280
+ # compare start end coordinates
281
+ x_start = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] < ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
282
+ y_start = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] < ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
283
+ x_end = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] > ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
284
+ y_end = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] > ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
285
+ if ROI_coordinates['clicks'] % 2 == 0:
286
+ sections[len(sections) - 1] = ((x_start, y_start, x_end, y_end), f"Mask {len(sections)}")
287
+ # both start and end point get
288
+ return (img, sections)
289
+ else:
290
+ point_width = int(img.shape[0]*0.05)
291
+ sections.append(((ROI_coordinates['x_new'], ROI_coordinates['y_new'],
292
+ ROI_coordinates['x_new'] + point_width, ROI_coordinates['y_new'] + point_width),
293
+ f"Click second point for Mask {len(sections) + 1}"))
294
+ return (img, sections)
295
+
296
+ def del_select_coordinates(img, evt: gr.SelectData):
297
+ del sections[evt.index]
298
+ # recreate section names
299
+ for i in range(len(sections)):
300
+ sections[i] = (sections[i][0], f"Mask {i + 1}")
301
+
302
+ # last section clicking second point not complete
303
+ if ROI_coordinates['clicks'] % 2 != 0:
304
+ if len(sections) == evt.index:
305
+ # delete last section
306
+ ROI_coordinates['clicks'] -= 1
307
+ else:
308
+ # recreate last section name for second point
309
+ ROI_coordinates['clicks'] -= 2
310
+ sections[len(sections) - 1] = (sections[len(sections) - 1][0], f"Click second point for Mask {len(sections) + 1}")
 
 
 
 
 
 
 
 
 
311
  else:
 
312
  ROI_coordinates['clicks'] -= 2
313
+
314
+ return (img[0], sections)
315
+
316
+ with gr.Blocks(title="Watermark Anything Demo") as demo:
317
+ gr.Markdown("""
318
+ # Watermark Anything Demo
319
+ This app demonstrates watermark detection and embedding using the Watermark Anything model.
320
+ Find the project [here](https://github.com/facebookresearch/watermark-anything).
321
+ """)
322
+
323
+ with gr.Tabs():
324
+ with gr.TabItem("Embed Watermark"):
325
+ with gr.Row():
326
+ with gr.Column():
327
+ embedding_img = gr.Image(label="Input Image", type="numpy")
328
+
329
+ with gr.Column():
330
+ embedding_box = gr.AnnotatedImage(
331
+ visible=False,
332
+ label="ROI: Click on left 'Input Image'",
333
+ color_map={
334
+ "ROI of Watermark embedding": "#9987FF",
335
+ "Click second point for ROI": "#f44336"}
336
+ )
337
+
338
+ embedding_num = gr.Slider(1, 5, value=1, step=1, label="Number of Watermarks")
339
+ embedding_type = gr.Radio(["random", "input"], value="random", label="Type", info="Type of watermarks")
340
+ embedding_str = gr.Textbox(label="Watermark Text", visible=False, show_copy_button=True)
341
+ embedding_loc = gr.Radio(["random", "bounding"], value="random", label="Location", info="Location of watermarks")
342
+
343
+ embedding_btn = gr.Button("Embed Watermark")
344
+ marked_msg = gr.JSON(label="Marked Messages")
345
+ with gr.Row():
346
+ marked_image = gr.Image(label="Watermarked Image")
347
+ marked_mask = gr.Image(label="Position of the watermark")
348
+
349
+ embedding_img.select(
350
+ fn=get_select_coordinates,
351
+ inputs=[embedding_img, embedding_num],
352
+ outputs=embedding_box)
353
+ embedding_box.select(
354
+ fn=del_select_coordinates,
355
+ inputs=embedding_box,
356
+ outputs=embedding_box
357
+ )
358
+
359
+ # The inability to dynamically render `AnnotatedImage` is because,
360
+ # when placed inside `gr.Column()`, it prevents listeners from being added to controls outside the column.
361
+ # Dynamically adding a select listener will not change the cursor shape of the Image.
362
+ # So `render` cannot work properly in this scenario.
363
+ #
364
+ # @gr.render(inputs=embedding_loc)
365
+ # def show_split(wm_loc):
366
+ # if wm_loc == "bounding":
367
+ # embedding_img.select(
368
+ # fn=get_select_coordinates,
369
+ # inputs=[embedding_img, embedding_num],
370
+ # outputs=embedding_box)
371
+ # embedding_box.select(
372
+ # fn=del_select_coordinates,
373
+ # inputs=embedding_box,
374
+ # outputs=embedding_box
375
+ # )
376
+ # else:
377
+ # embedding_img.select()
378
+
379
+ def visible_box_image(img, wm_loc):
380
+ if wm_loc == "bounding":
381
+ return gr.update(visible=True, value=(img,sections))
382
+ else:
383
+ sections.clear()
384
+ ROI_coordinates['clicks'] = 0
385
+ return gr.update(visible=False, value=(img,sections))
386
+ embedding_loc.change(
387
+ fn=visible_box_image,
388
+ inputs=[embedding_img, embedding_loc],
389
+ outputs=[embedding_box]
390
+ )
391
+
392
+ def visible_text_label(embedding_type, embedding_num):
393
+ if embedding_type == "input":
394
+ tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
395
+ return gr.update(visible=True, label=f"Watermark Text (Format: {tip})")
396
+ else:
397
+ return gr.update(visible=False)
398
+
399
+ def check_embedding_str(embedding_str, embedding_num):
400
+ if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (embedding_num-1), embedding_str):
401
+ tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
402
+ gr.Warning(f"Invalid format. Please use {tip}", duration=0)
403
+ return gr.update(interactive=False)
404
+ else:
405
+ return gr.update(interactive=True)
406
+
407
+ embedding_num.change(
408
+ fn=visible_text_label,
409
+ inputs=[embedding_type, embedding_num],
410
+ outputs=[embedding_str]
411
+ )
412
+ embedding_type.change(
413
+ fn=visible_text_label,
414
+ inputs=[embedding_type, embedding_num],
415
+ outputs=[embedding_str]
416
+ )
417
+ embedding_str.change(
418
+ fn=check_embedding_str,
419
+ inputs=[embedding_str, embedding_num],
420
+ outputs=[embedding_btn]
421
+ )
422
+
423
+ embedding_btn.click(
424
+ fn=embed_watermark,
425
+ inputs=[embedding_img, embedding_num, embedding_type, embedding_str, embedding_loc],
426
+ outputs=[marked_image, marked_mask, marked_msg]
427
+ )
428
+
429
+ with gr.TabItem("Detect Watermark"):
430
+ with gr.Row():
431
+ with gr.Column():
432
+ detecting_img = gr.Image(label="Input Image", type="numpy", height=512)
433
+ with gr.Column():
434
+ tip_md = gr.Markdown("""
435
+ **Note:** The split operation might not yield any results,
436
+ and subprocesses will be used to support timeout.
437
+
438
+ On the Windows platform, creating subprocesses will be noticeably slower.
439
+ """)
440
+ multi_ckb = gr.Checkbox(label="Split into multiple", value=False)
441
+ timeout_sli = gr.Slider(1, max_timeout, value=30, step=1, label="Timeout of multiple", visible=False)
442
+ detecting_btn = gr.Button("Detect Watermark")
443
+ predicted_messages = gr.JSON(label="Detected Messages")
444
+ color_cluster = gr.Markdown()
445
+ with gr.Row():
446
+ predicted_mask = gr.Image(label="Predicted Watermark Position")
447
+ predicted_cluster = gr.Image(label="Watermark Clusters")
448
+
449
+ detecting_img.change(
450
+ fn=lambda x: gr.update(value=False),
451
+ inputs=detecting_img,
452
+ outputs=multi_ckb
453
+ )
454
+ multi_ckb.change(
455
+ fn=lambda x: gr.update(visible=x),
456
+ inputs=multi_ckb,
457
+ outputs=timeout_sli
458
+ )
459
+ detecting_btn.click(
460
+ fn=detect_watermark,
461
+ inputs=[detecting_img, multi_ckb, timeout_sli],
462
+ outputs=[predicted_mask, predicted_cluster, predicted_messages, color_cluster]
463
+ )
464
+
465
+
466
+ if __name__ == '__main__':
467
+ demo.launch()
multiwm.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from wrapt_timeout_decorator import *
2
+
3
+ from notebooks.inference_utils import (
4
+ multiwm_dbscan
5
+ )
6
+
7
+ @timeout(60, use_signals=False)
8
+ def dbscan(bit_preds, mask_preds, epsilon, min_samples, **kwargs):
9
+ print("multiwm task started.")
10
+ return multiwm_dbscan(preds=bit_preds, masks=mask_preds, epsilon=epsilon, min_samples=min_samples)
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  torch==2.5.1
2
  GitPython==3.1.43
3
  gradio==5.8.0
4
- huggingface-hub==0.26.3
 
 
1
  torch==2.5.1
2
  GitPython==3.1.43
3
  gradio==5.8.0
4
+ huggingface-hub==0.26.3
5
+ wrapt-timeout-decorator==1.5.1