z-uo commited on
Commit
e708547
1 Parent(s): 1016041
Files changed (2) hide show
  1. .gitignore +3 -0
  2. app.py +26 -6
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ venv
app.py CHANGED
@@ -16,7 +16,6 @@ from models.misc import nested_tensor_from_tensor_list
16
  model = create_letr()
17
 
18
  # PREPARE PREPROCESSING
19
- test_size = 1100
20
  # transform_test = transforms.Compose([
21
  # transforms.Resize((test_size)),
22
  # transforms.ToTensor(),
@@ -25,16 +24,31 @@ test_size = 1100
25
  normalize = Compose([
26
  ToTensor(),
27
  Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]),
28
- Resize([test_size]),
 
 
 
 
 
 
 
 
 
 
29
  ])
30
 
31
 
32
- def predict(inp):
33
  image = Image.fromarray(inp.astype('uint8'), 'RGB')
34
  h, w = image.height, image.width
35
  orig_size = torch.as_tensor([int(h), int(w)])
36
 
37
- img = normalize(image)
 
 
 
 
 
38
  inputs = nested_tensor_from_tensor_list([img])
39
 
40
  with torch.no_grad():
@@ -45,13 +59,19 @@ def predict(inp):
45
  return image
46
 
47
 
48
- inputs = gr.inputs.Image()
 
 
 
49
  outputs = gr.outputs.Image()
50
  gr.Interface(
51
  fn=predict,
52
  inputs=inputs,
53
  outputs=outputs,
54
- examples=["demo.png", "tappeto-per-calibrazione.jpg"],
 
 
 
55
  title="LETR",
56
  description="Model for line detection..."
57
  ).launch()
 
16
  model = create_letr()
17
 
18
  # PREPARE PREPROCESSING
 
19
  # transform_test = transforms.Compose([
20
  # transforms.Resize((test_size)),
21
  # transforms.ToTensor(),
 
24
  normalize = Compose([
25
  ToTensor(),
26
  Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]),
27
+ Resize([256]),
28
+ ])
29
+ normalize_512 = Compose([
30
+ ToTensor(),
31
+ Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]),
32
+ Resize([512]),
33
+ ])
34
+ normalize_1100 = Compose([
35
+ ToTensor(),
36
+ Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]),
37
+ Resize([1100]),
38
  ])
39
 
40
 
41
+ def predict(inp, size):
42
  image = Image.fromarray(inp.astype('uint8'), 'RGB')
43
  h, w = image.height, image.width
44
  orig_size = torch.as_tensor([int(h), int(w)])
45
 
46
+ if size == '1100':
47
+ img = normalize_1100(image)
48
+ elif size == '512':
49
+ img = normalize_512(image)
50
+ else:
51
+ img = normalize(image)
52
  inputs = nested_tensor_from_tensor_list([img])
53
 
54
  with torch.no_grad():
 
59
  return image
60
 
61
 
62
+ inputs = [
63
+ gr.inputs.Image(),
64
+ gr.inputs.Radio(["256", "512", "1100"]),
65
+ ]
66
  outputs = gr.outputs.Image()
67
  gr.Interface(
68
  fn=predict,
69
  inputs=inputs,
70
  outputs=outputs,
71
+ examples=[
72
+ ["demo.png", '256'],
73
+ ["tappeto-per-calibrazione.jpg", '256']
74
+ ],
75
  title="LETR",
76
  description="Model for line detection..."
77
  ).launch()