mawady commited on
Commit
9b1b4e2
·
1 Parent(s): e56299c
Files changed (1) hide show
  1. app.py +21 -64
app.py CHANGED
@@ -16,87 +16,44 @@ from mmocr.apis import MMOCRInferencer
16
  ocr = MMOCRInferencer(det='TextSnake', rec='ABINet_Vision')
17
 
18
  url = (
19
- "https://upload.wikimedia.org/wikipedia/commons/3/38/Adorable-animal-cat-20787.jpg"
20
  )
21
- path_input = "./cat.jpg"
22
  urllib.request.urlretrieve(url, filename=path_input)
23
 
24
- url = "https://upload.wikimedia.org/wikipedia/commons/4/43/Cute_dog.jpg"
25
- path_input = "./dog.jpg"
26
  urllib.request.urlretrieve(url, filename=path_input)
27
 
28
- # model = keras_model(weights="imagenet")
29
 
30
- # n_steps = 50
31
- # method = "gausslegendre"
32
- # internal_batch_size = 50
33
- # ig = IntegratedGradients(
34
- # model, n_steps=n_steps, method=method, internal_batch_size=internal_batch_size
35
- # )
36
 
 
 
 
37
 
38
  def do_process(img):
39
- return img
40
- # instance = image.img_to_array(img)
41
- # instance = np.expand_dims(instance, axis=0)
42
- # instance = preprocess_input(instance)
43
- # preds = model.predict(instance)
44
- # lstPreds = decode_predictions(preds, top=3)[0]
45
- # dctPreds = {
46
- # lstPreds[i][1]: round(float(lstPreds[i][2]), 2) for i in range(len(lstPreds))
47
- # }
48
- # predictions = preds.argmax(axis=1)
49
- # if baseline == "white":
50
- # baselines = bls = np.ones(instance.shape).astype(instance.dtype)
51
- # img_flt = Image.fromarray(np.uint8(np.squeeze(baselines) * 255))
52
- # elif baseline == "black":
53
- # baselines = bls = np.zeros(instance.shape).astype(instance.dtype)
54
- # img_flt = Image.fromarray(np.uint8(np.squeeze(baselines) * 255))
55
- # elif baseline == "blur":
56
- # img_flt = img.filter(ImageFilter.GaussianBlur(5))
57
- # baselines = image.img_to_array(img_flt)
58
- # baselines = np.expand_dims(baselines, axis=0)
59
- # baselines = preprocess_input(baselines)
60
- # else:
61
- # baselines = np.random.random_sample(instance.shape).astype(instance.dtype)
62
- # img_flt = Image.fromarray(np.uint8(np.squeeze(baselines) * 255))
63
- # explanation = ig.explain(instance, baselines=baselines, target=predictions)
64
- # attrs = explanation.attributions[0]
65
- # fig, ax = visualize_image_attr(
66
- # attr=attrs.squeeze(),
67
- # original_image=img,
68
- # method="blended_heat_map",
69
- # sign="all",
70
- # show_colorbar=True,
71
- # title=baseline,
72
- # plt_fig_axis=None,
73
- # use_pyplot=False,
74
- # )
75
- # fig.tight_layout()
76
- # buf = io.BytesIO()
77
- # fig.savefig(buf)
78
- # buf.seek(0)
79
- # img_res = Image.open(buf)
80
- # return img_res, img_flt, dctPreds
81
-
82
 
83
  input_im = gr.inputs.Image(
84
- shape=(224, 224), image_mode="RGB", invert_colors=False, source="upload", type="pil"
85
  )
86
- # input_drop = gr.inputs.Dropdown(
87
- # label="Baseline (default: random)",
88
- # choices=["random", "black", "white", "blur"],
89
- # default="random",
90
- # type="value",
91
- # )
92
 
93
  output_img = gr.outputs.Image(label="Output of Integrated Gradients", type="pil")
94
  # output_base = gr.outputs.Image(label="Baseline image", type="pil")
95
  # output_label = gr.outputs.Label(label="Classification results", num_top_classes=3)
96
 
97
- title = "XAI - Integrated gradients"
98
- description = "Playground: Integrated gradients for a ResNet model trained on Imagenet dataset. Tools: Alibi, TF, Gradio."
99
- examples = [["./cat.jpg"], ["./dog.jpg"]]
100
  article = "<p style='text-align: center'><a href='https://github.com/mawady' target='_blank'>By Dr. Mohamed Elawady</a></p>"
101
  iface = gr.Interface(
102
  fn=do_process,
 
16
  ocr = MMOCRInferencer(det='TextSnake', rec='ABINet_Vision')
17
 
18
  url = (
19
+ "https://upload.wikimedia.org/wikipedia/commons/thumb/5/5b/Draft_Marks_on_the_Bow_of_Kruzenshtern_Port_of_Tallinn_16_July_2011.jpg/1600px-Draft_Marks_on_the_Bow_of_Kruzenshtern_Port_of_Tallinn_16_July_2011.jpg"
20
  )
21
+ path_input = "./example1.jpg"
22
  urllib.request.urlretrieve(url, filename=path_input)
23
 
24
+ url = "https://upload.wikimedia.org/wikipedia/commons/3/3e/733_how-deep.jpg"
25
+ path_input = "./example2.jpg"
26
  urllib.request.urlretrieve(url, filename=path_input)
27
 
 
28
 
29
+ path_img_output_folder = "./demo-out"
30
+ if not os.path.exists(path_img_output_folder):
31
+ os.makedirs(path_img_output_folder)
 
 
 
32
 
33
+ path_img_input_folder = "./demo-input"
34
+ if not os.path.exists(path_img_input_folder):
35
+ os.makedirs(path_img_input_folder)
36
 
37
  def do_process(img):
38
+ img_name = 'tmp.jpg'
39
+ img.save(path_input)
40
+ path_input = os.path.join(path_img_input_folder, img_name)
41
+ path_output = os.path.join(path_img_output_folder, 'vis',img_name)
42
+ result = ocr(path_input, out_dir=path_img_output_folder, save_vis=True)
43
+ img_res = Image(filename=path_output)
44
+ return img_res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  input_im = gr.inputs.Image(
47
+ shape=None, image_mode="RGB", invert_colors=False, source="upload", type="pil"
48
  )
 
 
 
 
 
 
49
 
50
  output_img = gr.outputs.Image(label="Output of Integrated Gradients", type="pil")
51
  # output_base = gr.outputs.Image(label="Baseline image", type="pil")
52
  # output_label = gr.outputs.Label(label="Classification results", num_top_classes=3)
53
 
54
+ title = "Reading draught marks"
55
+ description = "Playground: Reading draught marks using pre-trained models. Tools: MMOCR, Gradio."
56
+ examples = [["./example1.jpg"], ["./example2.jpg"]]
57
  article = "<p style='text-align: center'><a href='https://github.com/mawady' target='_blank'>By Dr. Mohamed Elawady</a></p>"
58
  iface = gr.Interface(
59
  fn=do_process,