saritha5 commited on
Commit
fb00789
1 Parent(s): 14d2eec

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -0
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import simplejson
2
+ import tensorflow
3
+ import visualization_utils as vis_util
4
+ from PIL import Image
5
+ import numpy as np
6
+ from PIL import Image
7
+ import numpy as np
8
+ import label_map_util
9
+ import tensorflow as tf
10
+ from matplotlib import pyplot as plt
11
+ import time
12
+ import cv2
13
+ from numpy import asarray
14
+ import streamlit as st
15
+ st.title("Tag_Diciphering")
16
+ def prediction():
17
+ total_time_start = time.time()
18
+
19
+
20
+ def loadImageIntoNumpyArray(image):
21
+ (im_width, im_height) = image.size
22
+ if image.getdata().mode == "RGBA":
23
+ image = image.convert('RGB')
24
+
25
+ return asarray(image).reshape((im_height, im_width, 3)).astype(np.uint8)
26
+
27
+
28
+ def main(image_path,model_path,model_PATH_TO_CKPT,path_to_labels):
29
+ image = cv2.open(image_path)
30
+ image_np = loadImageIntoNumpyArray(image)
31
+ image_np_expanded = np.expand_dims(image_np, axis=0)
32
+ label_map = label_map_util.load_labelmap(path_to_labels)
33
+ # print("label_map------->",type(label_map))
34
+ categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=100, use_display_name=True)
35
+ category_index = label_map_util.create_category_index(categories)
36
+ # print("category index-->",category_index)
37
+
38
+ detection_graph = tf.Graph()
39
+ with detection_graph.as_default():
40
+ od_graph_def = tf.compat.v1.GraphDef()
41
+ with tf.compat.v2.io.gfile.GFile(model_PATH_TO_CKPT, 'rb') as fid:
42
+ serialized_graph = fid.read()
43
+ od_graph_def.ParseFromString(serialized_graph)
44
+ tf.import_graph_def(od_graph_def, name='')
45
+ sess = tf.compat.v1.Session(graph=detection_graph)
46
+ # Input tensor is the image
47
+ image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
48
+ # Output tensors are the detection boxes, scores, and classes
49
+ # Each box represents a part of the image where a particular object was detected
50
+ detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
51
+ # Each score represents level of confidence for each of the objects.
52
+ # The score is shown on the result image, together with the class label.
53
+ detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
54
+ detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
55
+ # Number of objects detected
56
+ num_detections = detection_graph.get_tensor_by_name('num_detections:0')
57
+ (boxes, scores, classes, num) = sess.run(
58
+ [detection_boxes, detection_scores, detection_classes, num_detections],
59
+ feed_dict={image_tensor: image_np_expanded})
60
+ vis_util.visualize_boxes_and_labels_on_image_array(
61
+ image_np,
62
+ np.squeeze(boxes),
63
+ np.squeeze(classes).astype(np.int32),
64
+ np.squeeze(scores),
65
+ category_index,
66
+ use_normalized_coordinates=True,
67
+ line_thickness=8,
68
+ min_score_thresh=0.1)
69
+ #%matplotlib inline
70
+ from matplotlib import pyplot as plt
71
+ # print("boxes:",boxes)
72
+ # print("class:",classes)
73
+ objects = []
74
+ threshold = 0.5
75
+ # print("category:",category_index)
76
+ boxes = boxes[0]
77
+ for index, value in enumerate(classes[0]):
78
+ object_dict = {}
79
+ if scores[0, index] > threshold:
80
+ object_dict["class"] = (category_index.get(value)).get('name')
81
+ object_dict["score"] = round(scores[0, index] * 100,2)
82
+ box = tuple(boxes[index].tolist())
83
+ ymin, xmin, ymax, xmax= box
84
+ im_width,im_height = 360,360
85
+ left, right, top, bottom = (xmin * im_width, xmax * im_width,
86
+ ymin * im_height, ymax * im_height)
87
+ object_dict["box"] = (int(left), int(right), int(top), int(bottom))
88
+ objects.append(object_dict)
89
+
90
+ image_orignal = Image.open(image_path)
91
+ image_np_orignal = loadImageIntoNumpyArray(image_orignal)
92
+
93
+
94
+ fig, ax = plt.subplots(1,2)
95
+
96
+ fig.suptitle('Tag Deciphering')
97
+
98
+ ax[0].imshow(image_np_orignal,aspect='auto');
99
+ ax[1].imshow(image_np,aspect='auto');
100
+
101
+
102
+ return objects
103
+ images = ["img1.jpg","img2.jpg","img3.jpg","img4.jpg"]
104
+ with st.sidebar:
105
+ st.write("choose an image")
106
+ st.image(images)
107
+ file = st.file_uploader('Upload an Image',type=(["jpeg","jpg","png"]))
108
+
109
+ if file is None:
110
+ st.write("Please upload an image file")
111
+ else:
112
+ image= Image.open(file)
113
+ st.image(image,use_column_width = True)
114
+
115
+ image_path = file
116
+ model_path = "//inference"
117
+ model_PATH_TO_CKPT = "frozen_inference_graph.pb"
118
+ path_to_labels = "tf_label_map.pbtxt"
119
+
120
+ result = main(image_path,model_path,model_PATH_TO_CKPT,path_to_labels)
121
+ st.write(result)
122
+ # print("result-",result)
123
+ # list_to_be_sorted= [{'class': 'Y', 'score': 99.97, 'box': (157, 191, 269, 288)}, {'class': '6', 'score': 99.93, 'box': (158, 191, 247, 267)}, {'class': '9', 'score': 99.88, 'box': (156, 190, 179, 196)}, {'class': '4', 'score': 99.8, 'box': (156, 189, 198, 219)}, {'class': '1', 'score': 99.65, 'box': (157, 189, 222, 244)}, {'class': 'F', 'score': 63.4, 'box': (155, 185, 157, 175)}]
124
+ newlist = sorted(result, key=lambda k: k['box'][3],reverse=False)
125
+
126
+ text =''
127
+ for each in newlist:
128
+ if(each['score']>65):
129
+ text += each['class']
130
+ # print("text:",text)
131
+ if(text!=""):
132
+ text = text.replace("yellowTag", "")
133
+ result = text
134
+ else:
135
+ result = "No Vertical Tag Detected"
136
+ response = {"predictions": [result]}
137
+ total_time_end = time.time()
138
+ print("total time : ",round((total_time_end-total_time_start),2))
139
+ st.write(str(simplejson.dumps(response)))
140
+
141
+ prediction()