blakeleo commited on
Commit
0e475fc
1 Parent(s): 569dd3b

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -146
app.py DELETED
@@ -1,146 +0,0 @@
1
- from fastai.vision.all import *
2
- from io import BytesIO
3
- import requests
4
- import streamlit as st
5
-
6
- import numpy as np
7
- import torch
8
- import time
9
- import cv2
10
- from numpy import random
11
- from models.experimental import attempt_load
12
- from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
13
- scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
14
- from utils.plots import plot_one_box
15
-
16
- def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
17
- # Resize and pad image while meeting stride-multiple constraints
18
- shape = img.shape[:2] # current shape [height, width]
19
- if isinstance(new_shape, int):
20
- new_shape = (new_shape, new_shape)
21
-
22
- # Scale ratio (new / old)
23
- r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
24
- if not scaleup: # only scale down, do not scale up (for better test mAP)
25
- r = min(r, 1.0)
26
-
27
- # Compute padding
28
- ratio = r, r # width, height ratios
29
- new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
30
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
31
- if auto: # minimum rectangle
32
- dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
33
- elif scaleFill: # stretch
34
- dw, dh = 0.0, 0.0
35
- new_unpad = (new_shape[1], new_shape[0])
36
- ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
37
-
38
- dw /= 2 # divide padding into 2 sides
39
- dh /= 2
40
-
41
- if shape[::-1] != new_unpad: # resize
42
- img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
43
- top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
44
- left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
45
- img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
46
- return img, ratio, (dw, dh)
47
-
48
- def detect_modify(img0, model, conf=0.4, imgsz=640, conf_thres = 0.25, iou_thres=0.45):
49
- st.image(img0, caption="Your image", use_column_width=True)
50
-
51
- stride = int(model.stride.max()) # model stride
52
- imgsz = check_img_size(imgsz, s=stride) # check img_size
53
-
54
- # Padded resize
55
- img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR)
56
- img = letterbox(img0, imgsz, stride=stride)[0]
57
- # Convert
58
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
59
- img = np.ascontiguousarray(img)
60
-
61
-
62
- # Get names and colors
63
- names = model.module.names if hasattr(model, 'module') else model.names
64
- colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
65
-
66
- # Run inference
67
- old_img_w = old_img_h = imgsz
68
- old_img_b = 1
69
-
70
- t0 = time.time()
71
- img = torch.from_numpy(img).to(device)
72
- # img /= 255.0 # 0 - 255 to 0.0 - 1.0
73
- img = img/255.0
74
- if img.ndimension() == 3:
75
- img = img.unsqueeze(0)
76
-
77
- # Inference
78
- # t1 = time_synchronized()
79
- with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
80
- pred = model(img)[0]
81
- # t2 = time_synchronized()
82
-
83
- # Apply NMS
84
- pred = non_max_suppression(pred, conf_thres, iou_thres)
85
- # t3 = time_synchronized()
86
-
87
- # Process detections
88
- # for i, det in enumerate(pred): # detections per image
89
-
90
- gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # normalization gain whwh
91
-
92
- det = pred[0]
93
- if len(det):
94
- # Rescale boxes from img_size to im0 size
95
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
96
-
97
- # Print results
98
- s = ''
99
- for c in det[:, -1].unique():
100
- n = (det[:, -1] == c).sum() # detections per class
101
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
102
-
103
- # Write results
104
- for *xyxy, conf, cls in reversed(det):
105
- label = f'{names[int(cls)]} {conf:.2f}'
106
- plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1)
107
-
108
- f"""
109
- ### Prediction result:
110
- """
111
- img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_BGR2RGB)
112
- st.image(img0, caption="Prediction Result", use_column_width=True)
113
-
114
- #set paramters
115
- weight_path = './resnet34_stage-l.pkl'
116
- imgsz = 640
117
- conf = 0.4
118
- conf_thres = 0.25
119
- iou_thres=0.45
120
- device = torch.device("cpu")
121
- path = "./"
122
-
123
- # Load model
124
- model = attempt_load(weight_path, map_location=torch.device('cpu')) # load FP32 model
125
-
126
- """
127
- # YOLOv7
128
- This is a object detection model for [Objects].
129
- """
130
- option = st.radio("", ["Upload Image", "Image URL"])
131
-
132
- if option == "Upload Image":
133
- uploaded_file = st.file_uploader("Please upload an image.")
134
-
135
- if uploaded_file is not None:
136
- img = PILImage.create(uploaded_file)
137
- detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
138
- else:
139
- url = st.text_input("Please input a url.")
140
- if url != "":
141
- try:
142
- response = requests.get(url)
143
- pil_img = PILImage.create(BytesIO(response.content))
144
- detect_modify(pil_img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres)
145
- except:
146
- st.text("Problem reading image from", url)