2up1down commited on
Commit
b0d6593
β€’
1 Parent(s): c9cf959

Upload 2 files

Browse files

add error message & example0

Files changed (2) hide show
  1. app.py +20 -11
  2. example0.jpg +0 -0
app.py CHANGED
@@ -4,6 +4,7 @@ import os
4
  from ultralytics import YOLO
5
 
6
  from google.cloud import vision
 
7
  _api_key = os.environ["API_KEY"]
8
  _project_id = os.environ["PROJECT_ID"]
9
  client = vision.ImageAnnotatorClient(client_options={"quota_project_id": _project_id, "api_key": _api_key})
@@ -29,7 +30,7 @@ keypointModel = r'keypoints-best.pt'
29
  minSz = 1280
30
 
31
 
32
- _examples = [["example1.jpg",True], ["example2.jpg",False], ["example3.jpg",True]]
33
 
34
 
35
  def unwarp_image(warped_image, src_points, dst_points, output_width, output_height):
@@ -64,7 +65,7 @@ def get_corners(results:list, img):
64
  planars = []
65
  kps = []
66
  for kpco in r.keypoints.xy.cpu():#.squeeze()
67
- assert len(kpco)>0, "not found"
68
  keypoints = {k:v.numpy() for v,k in zip(kpco,KP)}
69
  sz = model1DIM
70
  dstCorners = np.array([(0,0),(sz,0),(sz,sz),(0,sz)])
@@ -102,14 +103,17 @@ def preprocessImg(planar):
102
 
103
 
104
  def get_keypoints(results:list):
105
- assert len(results) ==1, "found multiple dials. expected only 1"
 
106
  r = results[0]
107
  # ordering
108
  kp = "start_kp center end_kp tip".split()
109
  kpco = r.keypoints.xy.cpu().squeeze()
110
  keypoints = {k:v.numpy() for v,k in zip(kpco,kp)}
111
- assert len(keypoints["center"])==2, "center keypoint not found"
112
- assert len(keypoints["tip"])==2, "tip keypoint not found"
 
 
113
  return keypoints
114
 
115
  def cosangle(a,b, ignoreRot=False):
@@ -188,7 +192,7 @@ def result_as_validvalue(contents:list[dict])->tuple[list[dict], list[str]]:
188
  valid.append({"text":f["text"], "value": value, "mid": m, "apchar":a, "box":b})
189
 
190
  valid.sort(key=lambda e: e["value"])
191
- return valid, other
192
 
193
 
194
  distance = lambda a,b : np.sqrt(np.square(np.array(a)-np.array(b)).sum())
@@ -344,10 +348,11 @@ def get_needle_value(img, keypoints):
344
  contents = get_text_from_image(client, img)
345
  toc = time()
346
  print(f"ocr took: {toc-tic2:.1g}")
347
-
348
- assert len(contents)
349
  valid,other = result_as_validvalue(contents)
350
- assert len(valid)
 
351
 
352
  valid.append({"text":"tip", "mid":keypoints["tip"]})
353
  ix,an = sort_clockwise_with_start([e["mid"] for e in valid],*keypoints["center"], 0)
@@ -376,7 +381,8 @@ def get_needle_value(img, keypoints):
376
 
377
  center = np.array(keypoints["center"])
378
  values, rate = determine_ocr_neighbors(keypoints, valid, nearestIx)
379
- assert len(values)>=2, "failed to find at least 2 OCR values"
 
380
 
381
  # import pandas as pd
382
  # print(pd.DataFrame.from_dict(values))
@@ -484,13 +490,15 @@ def predict(img, detect_gauge_first):
484
  def test(img, detect_gauge_first):
485
  return {"msg":str(img.size), "other": detect_gauge_first}
486
 
 
487
  description = r"""
488
  <b>Official πŸ€— Gradio demo</b> for <a href='https://synanthropic.com/reading-analog-gauge' target='_blank'><b>Reading Analog Gauges: Automate Gauge Readings with AI in Days, Not Months
489
  </b></a>.<br>
490
  <br>
491
  This model reads analog dial gauge by detecting, applying perspective correction, and gauge reading.
492
  <br>
493
- The model was build <i><strong>only</strong></i> with synthetic data.<br>
 
494
  <br>
495
  You can read more about it [here](https://synanthropic.com/reading-analog-gauge).
496
  <br>
@@ -511,5 +519,6 @@ gr.Interface(title="Reading Analog Gauges",
511
  ],
512
  outputs="json",
513
  examples=_examples,
 
514
  cache_examples=True)\
515
  .launch()
 
4
  from ultralytics import YOLO
5
 
6
  from google.cloud import vision
7
+
8
  _api_key = os.environ["API_KEY"]
9
  _project_id = os.environ["PROJECT_ID"]
10
  client = vision.ImageAnnotatorClient(client_options={"quota_project_id": _project_id, "api_key": _api_key})
 
30
  minSz = 1280
31
 
32
 
33
+ _examples = [["example0.jpg", True],["example1.jpg",True], ["example2.jpg",False], ["example3.jpg",True]]
34
 
35
 
36
  def unwarp_image(warped_image, src_points, dst_points, output_width, output_height):
 
65
  planars = []
66
  kps = []
67
  for kpco in r.keypoints.xy.cpu():#.squeeze()
68
+ # assert len(kpco)>0, "not found"
69
  keypoints = {k:v.numpy() for v,k in zip(kpco,KP)}
70
  sz = model1DIM
71
  dstCorners = np.array([(0,0),(sz,0),(sz,sz),(0,sz)])
 
103
 
104
 
105
  def get_keypoints(results:list):
106
+ if len(results) !=1:
107
+ raise gr.Error("found multiple dials. expected only 1")
108
  r = results[0]
109
  # ordering
110
  kp = "start_kp center end_kp tip".split()
111
  kpco = r.keypoints.xy.cpu().squeeze()
112
  keypoints = {k:v.numpy() for v,k in zip(kpco,kp)}
113
+ if len(keypoints["center"])!=2:
114
+ raise gr.Error("center keypoint not found")
115
+ elif len(keypoints["tip"])!=2:
116
+ raise gr.Error("tip keypoint not found")
117
  return keypoints
118
 
119
  def cosangle(a,b, ignoreRot=False):
 
192
  valid.append({"text":f["text"], "value": value, "mid": m, "apchar":a, "box":b})
193
 
194
  valid.sort(key=lambda e: e["value"])
195
+ return valid, list(set(other))
196
 
197
 
198
  distance = lambda a,b : np.sqrt(np.square(np.array(a)-np.array(b)).sum())
 
348
  contents = get_text_from_image(client, img)
349
  toc = time()
350
  print(f"ocr took: {toc-tic2:.1g}")
351
+ if 0==len(contents):
352
+ raise gr.Error("failed to get any text/number")
353
  valid,other = result_as_validvalue(contents)
354
+ if 0==len(valid):
355
+ raise gr.Error("failed to get any number")
356
 
357
  valid.append({"text":"tip", "mid":keypoints["tip"]})
358
  ix,an = sort_clockwise_with_start([e["mid"] for e in valid],*keypoints["center"], 0)
 
381
 
382
  center = np.array(keypoints["center"])
383
  values, rate = determine_ocr_neighbors(keypoints, valid, nearestIx)
384
+ if len(values)<2:
385
+ raise gr.Error("failed to find at least 2 OCR number values")
386
 
387
  # import pandas as pd
388
  # print(pd.DataFrame.from_dict(values))
 
490
  def test(img, detect_gauge_first):
491
  return {"msg":str(img.size), "other": detect_gauge_first}
492
 
493
+
494
  description = r"""
495
  <b>Official πŸ€— Gradio demo</b> for <a href='https://synanthropic.com/reading-analog-gauge' target='_blank'><b>Reading Analog Gauges: Automate Gauge Readings with AI in Days, Not Months
496
  </b></a>.<br>
497
  <br>
498
  This model reads analog dial gauge by detecting, applying perspective correction, and gauge reading.
499
  <br>
500
+ The model was build <i><strong>only</strong></i> with synthetic data (e.g. examples).<br>
501
+ Hence, it <i>probably</i> will not work on significantly different images - give it a try. Let us know, so we can keep improving.<br>
502
  <br>
503
  You can read more about it [here](https://synanthropic.com/reading-analog-gauge).
504
  <br>
 
519
  ],
520
  outputs="json",
521
  examples=_examples,
522
+ allow_flagging="never",
523
  cache_examples=True)\
524
  .launch()
example0.jpg ADDED