|
import cv2
|
|
import numpy as np
|
|
from sklearn.neighbors import KNeighborsClassifier
|
|
|
|
|
|
back_sub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
|
|
|
|
|
|
def get_centroid(x, y, w, h):
|
|
return (int(x + w / 2), int(y + h / 2))
|
|
|
|
|
|
cap = cv2.VideoCapture(0)
|
|
|
|
|
|
knn = KNeighborsClassifier(n_neighbors=3)
|
|
|
|
|
|
object_features = []
|
|
object_labels = []
|
|
|
|
|
|
learning_interval = 30
|
|
frame_count = 0
|
|
|
|
while True:
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
|
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
|
|
|
|
fg_mask = back_sub.apply(frame)
|
|
|
|
|
|
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
|
|
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel, iterations=1)
|
|
fg_mask = cv2.dilate(fg_mask, kernel, iterations=1)
|
|
|
|
|
|
contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
|
|
for cnt in contours:
|
|
area = cv2.contourArea(cnt)
|
|
if area > 100:
|
|
x, y, w, h = cv2.boundingRect(cnt)
|
|
centroid = get_centroid(x, y, w, h)
|
|
|
|
|
|
features = [w, h, centroid[0], centroid[1]]
|
|
object_features.append(features)
|
|
object_labels.append(1)
|
|
|
|
|
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
|
cv2.circle(frame, centroid, 4, (0, 0, 255), -1)
|
|
|
|
|
|
frame_count += 1
|
|
if frame_count % learning_interval == 0 and len(object_features) > 5:
|
|
|
|
knn.fit(object_features, object_labels)
|
|
print("Model updated!")
|
|
|
|
|
|
if len(object_features) > 5 and frame_count % learning_interval == 0:
|
|
|
|
predicted_label = knn.predict([features])[0]
|
|
cv2.putText(frame, f"Predicted: {predicted_label}", (x, y - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
|
|
|
cv2.imshow('Optimized Object Tracking', frame)
|
|
|
|
|
|
if cv2.waitKey(1) & 0xFF == 27:
|
|
break
|
|
|
|
cap.release()
|
|
cv2.destroyAllWindows()
|
|
|