Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import pickle
|
|
8 |
# function which is returning the number of object detected
|
9 |
def number_object_detected(image):
|
10 |
|
11 |
-
custom_model = YOLO('best.pt') # custome yolo model path
|
12 |
results = custom_model(image,verbose= False)
|
13 |
|
14 |
dic = results[0].names
|
@@ -20,7 +20,8 @@ def number_object_detected(image):
|
|
20 |
for e , count in zip(unique_elements,counts):
|
21 |
a = dic[e]
|
22 |
class_count[a] = count
|
23 |
-
|
|
|
24 |
|
25 |
|
26 |
def car_detection_and_Cropping(image_path):
|
@@ -46,28 +47,88 @@ def car_detection_and_Cropping(image_path):
|
|
46 |
# Load the image using OpenCV
|
47 |
image = cv2.imread(image_path)
|
48 |
|
49 |
-
|
50 |
# Crop the image
|
51 |
crop_image = image[boxes[max_index][1]:boxes[max_index][3], boxes[max_index][0]:boxes[max_index][2]]
|
52 |
|
53 |
# passing the crop image to the detection model
|
54 |
|
55 |
-
class_c ,
|
56 |
else:
|
57 |
-
class_c ,
|
58 |
return class_c ,result
|
59 |
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
## loading the model
|
66 |
-
def process_data(
|
67 |
-
|
|
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
interface.launch()
|
|
|
8 |
# function which is returning the number of object detected
|
9 |
def number_object_detected(image):
|
10 |
|
11 |
+
custom_model = YOLO('runs/detect/train4/weights/best.pt') # custome yolo model path
|
12 |
results = custom_model(image,verbose= False)
|
13 |
|
14 |
dic = results[0].names
|
|
|
20 |
for e , count in zip(unique_elements,counts):
|
21 |
a = dic[e]
|
22 |
class_count[a] = count
|
23 |
+
print(class_count)
|
24 |
+
return (class_count,results )
|
25 |
|
26 |
|
27 |
def car_detection_and_Cropping(image_path):
|
|
|
47 |
# Load the image using OpenCV
|
48 |
image = cv2.imread(image_path)
|
49 |
|
|
|
50 |
# Crop the image
|
51 |
crop_image = image[boxes[max_index][1]:boxes[max_index][3], boxes[max_index][0]:boxes[max_index][2]]
|
52 |
|
53 |
# passing the crop image to the detection model
|
54 |
|
55 |
+
class_c ,result = number_object_detected(crop_image)
|
56 |
else:
|
57 |
+
class_c ,result= number_object_detected(image_path)
|
58 |
return class_c ,result
|
59 |
|
60 |
|
61 |
+
severity_points = {
|
62 |
+
'scratch': 1,
|
63 |
+
'dent': 2,
|
64 |
+
'rust': 2,
|
65 |
+
'paint-damage': 2,
|
66 |
+
'crack':2
|
67 |
+
}
|
68 |
+
|
69 |
+
def calculate_condition_score(detections):
|
70 |
+
total_score = 0
|
71 |
+
for detection, count in detections.items():
|
72 |
+
if detection in severity_points:
|
73 |
+
total_score += severity_points[detection] * count
|
74 |
+
return total_score
|
75 |
+
|
76 |
+
def normalize_score(score, max_score):
|
77 |
+
return (score / max_score) * 10
|
78 |
+
|
79 |
|
80 |
+
## this function will take the image url and call all the related functions
|
81 |
+
def estimate_condition(detections):
|
82 |
+
print("Detedtion list",detections)
|
83 |
+
max_possible_score = sum(severity_points.values()) # Assuming all types of damage detected
|
84 |
+
score = calculate_condition_score(detections)
|
85 |
+
normalized_score = normalize_score(score, max_possible_score)
|
86 |
|
87 |
+
if normalized_score <= 2: # If score is low, condition is Excellent
|
88 |
+
print("Condition Excellent")
|
89 |
+
return "Excellent"
|
90 |
+
elif (normalized_score >2 and normalized_score <=7): # If score is moderately low, condition is Good
|
91 |
+
print("Condition Good")
|
92 |
+
return "Good"
|
93 |
+
elif (normalized_score >7 and normalized_score <15): # If score is moderate, condition is Fair
|
94 |
+
print("Condition Fair")
|
95 |
+
return "Fair"
|
96 |
+
elif (normalized_score >15 and normalized_score<=20): # If score is moderately high, condition is Poor
|
97 |
+
print("Condition Poor")
|
98 |
+
return "Poor"
|
99 |
+
else: # If score is high, condition is Very Poor
|
100 |
+
print("Condition Very Poor")
|
101 |
+
return "Very Poor"
|
102 |
+
|
103 |
+
|
104 |
|
105 |
|
106 |
## loading the model
|
107 |
+
def process_data(files):
|
108 |
+
print(files)
|
109 |
+
file_names = [f[0] for f in files]
|
110 |
+
image_r = []
|
111 |
+
print('fileName',file_names)
|
112 |
|
113 |
+
damage_dic = {}
|
114 |
+
|
115 |
+
for f in file_names:
|
116 |
+
print('image is ',f)
|
117 |
+
damage, result = car_detection_and_Cropping(f)
|
118 |
+
for r in result:
|
119 |
+
im_array = r.plot(pil = True) # plot a BGR numpy array of predictions
|
120 |
+
array = im_array[..., ::-1] # Convert BGR to RGB PIL image
|
121 |
+
image_r.append(array)
|
122 |
+
for key in damage.keys():
|
123 |
+
if key in damage_dic:
|
124 |
+
damage_dic[key] += damage[key]
|
125 |
+
else:
|
126 |
+
damage_dic[key] = damage[key]
|
127 |
+
condition = estimate_condition(damage_dic)
|
128 |
+
|
129 |
+
|
130 |
+
return (condition,image_r)
|
131 |
+
|
132 |
+
interface = gr.Interface(fn=process_data, inputs=gr.Gallery(label='Upload Image of Car',type= 'filepath'),
|
133 |
+
outputs=[gr.Textbox(label="Number of Objects detected "),gr.Gallery(label='output',type='pil')], title=" 🚘Car Scratch and Dent Detection")
|
134 |
interface.launch()
|