Nicolas Burrus commited on
Commit
fcb1411
β€’
1 Parent(s): 75bdb59

First working version.

Browse files
app.py CHANGED
@@ -14,7 +14,7 @@ model = None
14
 
15
  def load_model():
16
  global model
17
- model = torch.jit.load("model.pt")
18
 
19
  def denormalize_and_clip_as_tensor (im: Tensor) -> Tensor:
20
  return torch.clip(im * 0.5 + 0.5, 0.0, 1.0)
@@ -23,17 +23,43 @@ def denormalize_and_clip_as_numpy (im: Tensor) -> np.ndarray:
23
  im = im.squeeze(0)
24
  return np.ascontiguousarray(denormalize_and_clip_as_tensor(im).permute(1,2,0).detach().cpu().numpy())
25
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def undo_antialiasing(im):
27
  im_torch = torch.from_numpy (im).permute(2,0,1).unsqueeze(0).float() / 255.0
28
  im_torch = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(im_torch)
 
 
 
 
 
29
  with torch.no_grad():
30
  output_torch = model(im_torch)
31
  output = denormalize_and_clip_as_numpy(output_torch.rgb)
32
- return (output*255.99).astype(np.uint8)
 
 
33
 
34
  load_model()
35
  iface = gr.Interface(fn=undo_antialiasing,
36
  inputs=gr.inputs.Image(),
37
  outputs=gr.outputs.Image(),
38
- examples=[['examples/Bowling.png'], ['examples/opencv.png']])
 
 
 
 
 
 
 
39
  iface.launch()
 
14
 
15
  def load_model():
16
  global model
17
+ model = torch.jit.load("v4_gated_unet-rn18-rn18_mse_bn32_5e-3_1e-5.pt")
18
 
19
  def denormalize_and_clip_as_tensor (im: Tensor) -> Tensor:
20
  return torch.clip(im * 0.5 + 0.5, 0.0, 1.0)
 
23
  im = im.squeeze(0)
24
  return np.ascontiguousarray(denormalize_and_clip_as_tensor(im).permute(1,2,0).detach().cpu().numpy())
25
 
26
+ # pad image to a multiple of 64
27
+ def pad_image(im, multiple=64):
28
+ # B,C,H,W
29
+ rows = im.shape[2]
30
+ cols = im.shape[3]
31
+ if rows % multiple == 0 and cols % multiple == 0:
32
+ return im
33
+ else:
34
+ rows_to_pad = multiple - (rows % multiple)
35
+ cols_to_pad = multiple - (cols % multiple)
36
+ return transforms.Pad(padding=(0, 0, cols_to_pad, rows_to_pad), padding_mode='reflect')(im)
37
+
38
  def undo_antialiasing(im):
39
  im_torch = torch.from_numpy (im).permute(2,0,1).unsqueeze(0).float() / 255.0
40
  im_torch = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(im_torch)
41
+
42
+ initial_rows = im.shape[0]
43
+ initial_cols = im.shape[1]
44
+ im_torch = pad_image(im_torch)
45
+
46
  with torch.no_grad():
47
  output_torch = model(im_torch)
48
  output = denormalize_and_clip_as_numpy(output_torch.rgb)
49
+ output = (output*255.99).astype(np.uint8)
50
+ output = output[:initial_rows, :initial_cols, :]
51
+ return output
52
 
53
  load_model()
54
  iface = gr.Interface(fn=undo_antialiasing,
55
  inputs=gr.inputs.Image(),
56
  outputs=gr.outputs.Image(),
57
+ examples=[
58
+ ['examples/opencv.png'],
59
+ ['examples/matplotlib.png'],
60
+ ['examples/coco_beach.png'],
61
+ ['examples/plot-bowling.png']])
62
+ # FIXME: add OpenCV-background
63
+ # FIXME: add some arXiv test image
64
+
65
  iface.launch()
examples/coco_beach.png ADDED

Git LFS Details

  • SHA256: 42f1ba281d976e47bafaad7e08a42008bcbe45f1b63350e22b664e96be4fbe8e
  • Pointer size: 131 Bytes
  • Size of remote file: 408 kB
examples/matplotlib.png ADDED

Git LFS Details

  • SHA256: 9351dc20a89f45cdae795f32bff66b1de205bdc71c5d6af24191237aca3017d4
  • Pointer size: 129 Bytes
  • Size of remote file: 9.48 kB
examples/{Bowling.png β†’ plot-bowling.png} RENAMED
File without changes
v4_gated_unet-rn18-rn18_mse_bn32_5e-3_1e-5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e2d3cad8875825f30420111462a97de4f2f91c642f27aabbeec5abaad4590d
3
+ size 118285630