npc0 commited on
Commit
7df070b
1 Parent(s): 69157ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -54
app.py CHANGED
@@ -1,42 +1,42 @@
1
  import os
2
- import yolov5
3
-
4
- # load model
5
- model = yolov5.load('keremberke/yolov5m-license-plate')
6
-
7
- # set model parameters
8
- model.conf = 0.5 # NMS confidence threshold
9
- model.iou = 0.25 # NMS IoU threshold
10
- model.agnostic = False # NMS class-agnostic
11
- model.multi_label = False # NMS multiple labels per box
12
- model.max_det = 1000 # maximum number of detections per image
13
-
14
- # set image
15
- def license_plate_detect(img):
16
- # perform inference
17
- results = model(img, size=640)
18
 
19
- # inference with test time augmentation
20
- results = model(img, augment=True)
21
 
22
- # parse results
23
- if len(results.pred):
24
- predictions = results.pred[0]
25
- boxes = predictions[:, :4] # x1, y1, x2, y2
26
- scores = predictions[:, 4]
27
- categories = predictions[:, 5]
28
- return boxes
29
-
30
- from PIL import Image
31
- # image = Image.open(img)
32
- import pytesseract
33
-
34
- def read_license_number(img):
35
- boxes = license_plate_detect(img)
36
- if boxes:
37
- return [pytesseract.image_to_string(
38
- image.crop(bbox.tolist()))
39
- for bbox in boxes]
40
 
41
  from transformers import CLIPProcessor, CLIPModel
42
  vit_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
@@ -51,12 +51,12 @@ def zero_shot_classification(image, labels):
51
  logits_per_image = outputs.logits_per_image # this is the image-text similarity score
52
  return logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
53
 
54
- installed_list = []
55
- # image = Image.open(requests.get(url, stream=True).raw)
56
- def check_solarplant_installed_by_license(license_number_list):
57
- if len(installed_list):
58
- return [license_number in installed_list
59
- for license_number in license_number_list]
60
 
61
  def check_solarplant_installed_by_image(image, output_label=False):
62
  zero_shot_class_labels = ["bus with solar panel grids",
@@ -66,12 +66,12 @@ def check_solarplant_installed_by_image(image, output_label=False):
66
  return zero_shot_class_labels[probs.argmax().item()]
67
  return probs.argmax().item() == 0
68
 
69
- def check_solarplant_broken(image):
70
- zero_shot_class_labels = ["white broken solar panel",
71
- "normal black solar panel grids"]
72
- probs = zero_shot_classification(image, zero_shot_class_labels)
73
- idx = probs.argmax().item()
74
- return zero_shot_class_labels[idx].split(" ")[1-idx]
75
 
76
  from fastsam import FastSAM, FastSAMPrompt
77
  os.system('wget https://huggingface.co/spaces/An-619/FastSAM/resolve/main/weights/FastSAM.pt')
@@ -106,13 +106,13 @@ def segment_solar_panel(img):
106
  import gradio as gr
107
 
108
  def greet(img):
109
- lns = read_license_number(img)
110
- if len(lns):
111
  seg = segment_solar_panel(img)
112
- return (seg,
113
- "車牌: " + '; '.join(lns) + "\n\n" \
114
- + "類型: "+ check_solarplant_installed_by_image(img, True) + "\n\n" \
115
- + "狀態:" + check_solarplant_broken(img))
 
116
  return (img, "空地��。。")
117
 
118
  iface = gr.Interface(fn=greet, inputs="image", outputs=["image", "text"])
 
1
  import os
2
+ # import yolov5
3
+
4
+ # # load model
5
+ # model = yolov5.load('keremberke/yolov5m-license-plate')
6
+
7
+ # # set model parameters
8
+ # model.conf = 0.5 # NMS confidence threshold
9
+ # model.iou = 0.25 # NMS IoU threshold
10
+ # model.agnostic = False # NMS class-agnostic
11
+ # model.multi_label = False # NMS multiple labels per box
12
+ # model.max_det = 1000 # maximum number of detections per image
13
+
14
+ # # set image
15
+ # def license_plate_detect(img):
16
+ # # perform inference
17
+ # results = model(img, size=640)
18
 
19
+ # # inference with test time augmentation
20
+ # results = model(img, augment=True)
21
 
22
+ # # parse results
23
+ # if len(results.pred):
24
+ # predictions = results.pred[0]
25
+ # boxes = predictions[:, :4] # x1, y1, x2, y2
26
+ # scores = predictions[:, 4]
27
+ # categories = predictions[:, 5]
28
+ # return boxes
29
+
30
+ # from PIL import Image
31
+ # # image = Image.open(img)
32
+ # import pytesseract
33
+
34
+ # def read_license_number(img):
35
+ # boxes = license_plate_detect(img)
36
+ # if boxes:
37
+ # return [pytesseract.image_to_string(
38
+ # image.crop(bbox.tolist()))
39
+ # for bbox in boxes]
40
 
41
  from transformers import CLIPProcessor, CLIPModel
42
  vit_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
 
51
  logits_per_image = outputs.logits_per_image # this is the image-text similarity score
52
  return logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
53
 
54
+ # installed_list = []
55
+ # # image = Image.open(requests.get(url, stream=True).raw)
56
+ # def check_solarplant_installed_by_license(license_number_list):
57
+ # if len(installed_list):
58
+ # return [license_number in installed_list
59
+ # for license_number in license_number_list]
60
 
61
  def check_solarplant_installed_by_image(image, output_label=False):
62
  zero_shot_class_labels = ["bus with solar panel grids",
 
66
  return zero_shot_class_labels[probs.argmax().item()]
67
  return probs.argmax().item() == 0
68
 
69
+ # def check_solarplant_broken(image):
70
+ # zero_shot_class_labels = ["white broken solar panel",
71
+ # "normal black solar panel grids"]
72
+ # probs = zero_shot_classification(image, zero_shot_class_labels)
73
+ # idx = probs.argmax().item()
74
+ # return zero_shot_class_labels[idx].split(" ")[1-idx]
75
 
76
  from fastsam import FastSAM, FastSAMPrompt
77
  os.system('wget https://huggingface.co/spaces/An-619/FastSAM/resolve/main/weights/FastSAM.pt')
 
106
  import gradio as gr
107
 
108
  def greet(img):
109
+ if check_solarplant_installed_by_image(img):
 
110
  seg = segment_solar_panel(img)
111
+ return (seg, '')
112
+ # return (seg,
113
+ # "車牌: " + '; '.join(lns) + "\n\n" \
114
+ # + "類型: "+ check_solarplant_installed_by_image(img, True) + "\n\n" \
115
+ # + "狀態:" + check_solarplant_broken(img))
116
  return (img, "空地��。。")
117
 
118
  iface = gr.Interface(fn=greet, inputs="image", outputs=["image", "text"])