npc0 commited on
Commit
45a05cd
1 Parent(s): bc1fb97

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -1,6 +1,4 @@
1
  import os
2
- # os.system('wget https://huggingface.co/spaces/An-619/FastSAM/resolve/main/weights/FastSAM.pt')
3
-
4
  import yolov5
5
 
6
  # load model
@@ -41,7 +39,7 @@ def read_license_number(img):
41
  for bbox in boxes]
42
 
43
  from transformers import CLIPProcessor, CLIPModel
44
- model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
45
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
46
 
47
  def zero_shot_classification(image, labels):
@@ -49,7 +47,7 @@ def zero_shot_classification(image, labels):
49
  images=image,
50
  return_tensors="pt",
51
  padding=True)
52
- outputs = model(**inputs)
53
  logits_per_image = outputs.logits_per_image # this is the image-text similarity score
54
  return logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
55
 
@@ -75,33 +73,34 @@ def check_solarplant_broken(image):
75
  idx = probs.argmax().item()
76
  return zero_shot_class_labels[idx].split(" ")[1-idx]
77
 
78
- # from fastsam import FastSAM, FastSAMPrompt
 
79
 
80
- # model = FastSAM('./FastSAM.pt')
81
- # DEVICE = 'cpu'
82
- # def segment_solar_panel(img):
83
- # # os.system('python Inference.py --model_path FastSAM.pt --img_path bus.jpg --text_prompt "solar panel grids"')
84
- # img = img.convert("RGB")
85
 
86
- # everything_results = model(img, device=DEVICE, retina_masks=True, imgsz=1024, conf=0.4, iou=0.9,)
87
- # prompt_process = FastSAMPrompt(img, everything_results, device=DEVICE)
88
 
89
- # # everything prompt
90
- # ann = prompt_process.everything_prompt()
91
 
92
- # # bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
93
- # ann = prompt_process.box_prompt(bbox=[[200, 200, 300, 300]])
94
 
95
- # # text prompt
96
- # ann = prompt_process.text_prompt(text='solar panel grids')
97
 
98
- # # point prompt
99
- # # points default [[0,0]] [[x1,y1],[x2,y2]]
100
- # # point_label default [0] [1,0] 0:background, 1:foreground
101
- # ann = prompt_process.point_prompt(points=[[620, 360]], pointlabel=[1])
102
 
103
- # prompt_process.plot(annotations=ann,output_path='./bus.jpg',)
104
- # return Image.Open('./bus.jpg')
105
 
106
 
107
  import gradio as gr
@@ -109,8 +108,7 @@ import gradio as gr
109
  def greet(img):
110
  lns = read_license_number(img)
111
  if len(lns):
112
- # seg = segment_solar_panel(img)
113
- seg = img
114
  return (seg,
115
  "車牌: " + '; '.join(lns) + "\n\n" \
116
  + "類型: "+ check_solarplant_installed_by_image(img, True) + "\n\n" \
 
1
  import os
 
 
2
  import yolov5
3
 
4
  # load model
 
39
  for bbox in boxes]
40
 
41
  from transformers import CLIPProcessor, CLIPModel
42
+ vit_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
43
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
44
 
45
  def zero_shot_classification(image, labels):
 
47
  images=image,
48
  return_tensors="pt",
49
  padding=True)
50
+ outputs = vit_model(**inputs)
51
  logits_per_image = outputs.logits_per_image # this is the image-text similarity score
52
  return logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
53
 
 
73
  idx = probs.argmax().item()
74
  return zero_shot_class_labels[idx].split(" ")[1-idx]
75
 
76
+ from fastsam import FastSAM, FastSAMPrompt
77
+ os.system('wget https://huggingface.co/spaces/An-619/FastSAM/resolve/main/weights/FastSAM.pt')
78
 
79
+ model = FastSAM('./FastSAM.pt')
80
+ DEVICE = 'cpu'
81
+ def segment_solar_panel(img):
82
+ # os.system('python Inference.py --model_path FastSAM.pt --img_path bus.jpg --text_prompt "solar panel grids"')
83
+ img = img.convert("RGB")
84
 
85
+ everything_results = model(img, device=DEVICE, retina_masks=True, imgsz=1024, conf=0.4, iou=0.9,)
86
+ prompt_process = FastSAMPrompt(img, everything_results, device=DEVICE)
87
 
88
+ # everything prompt
89
+ ann = prompt_process.everything_prompt()
90
 
91
+ # bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
92
+ ann = prompt_process.box_prompt(bbox=[[200, 200, 300, 300]])
93
 
94
+ # text prompt
95
+ ann = prompt_process.text_prompt(text='solar panel grids')
96
 
97
+ # point prompt
98
+ # points default [[0,0]] [[x1,y1],[x2,y2]]
99
+ # point_label default [0] [1,0] 0:background, 1:foreground
100
+ ann = prompt_process.point_prompt(points=[[620, 360]], pointlabel=[1])
101
 
102
+ prompt_process.plot(annotations=ann,output_path='./bus.jpg',)
103
+ return Image.Open('./bus.jpg')
104
 
105
 
106
  import gradio as gr
 
108
  def greet(img):
109
  lns = read_license_number(img)
110
  if len(lns):
111
+ seg = segment_solar_panel(img)
 
112
  return (seg,
113
  "車牌: " + '; '.join(lns) + "\n\n" \
114
  + "類型: "+ check_solarplant_installed_by_image(img, True) + "\n\n" \