import os
import cv2
import time
import argparse
import numpy as np
from distutils.util import strtobool
from ultralytics import YOLO

from deep_sort import DeepSort
from util import COLORS_10, draw_bboxes


class Detector(object):
    def __init__(self, args):
        self.args = args
        use_cuda = bool(strtobool(self.args.use_cuda))

        if args.display:
            
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        self.vdo = cv2.VideoCapture()
        # Load YOLOv8 model
        self.yolo = YOLO(args.yolo_weights)
        self.yolo.fuse()
        self.deepsort = DeepSort(args.deepsort_checkpoint, use_cuda=use_cuda)
        # For YOLOv8, we'll get class names from the model itself
        self.class_names = self.yolo.names

        

    def detect(self):
        cap = cv2.VideoCapture(0)
        while True:
            ret, frame = cap.read()
            if not ret:
                break
            ori_im = frame.copy()

            start = time.time()
            im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)

            
            # YOLOv8 inference
            results = self.yolo(im, conf=self.args.conf_thresh, iou=self.args.nms_thresh,device='cuda:0')
            
            # Process results
            bbox_xcycwh = []
            cls_conf = []
            
            for result in results:
                boxes = result.boxes
                for box in boxes:
                    x1, y1, x2, y2 = box.xyxy[0].tolist()
                    conf = box.conf.item()
                    
                    # Convert to xcycwh format
                    w = x2 - x1
                    h = y2 - y1
                    xc = x1 + w/2
                    yc = y1 + h/2
                    
                    bbox_xcycwh.append([xc, yc, w, h])
                    cls_conf.append(conf)
            
            if len(bbox_xcycwh) > 0:
                bbox_xcycwh = np.array(bbox_xcycwh)
                cls_conf = np.array(cls_conf)
                
                # Remove mask filtering since we only have one class
                bbox_xcycwh[:,3:] *= 1.2  # scale up the bounding box

                outputs = self.deepsort.update(bbox_xcycwh, cls_conf, im)
                if len(outputs) > 0:
                    # print(bbox_xcycwh, cls_conf)
                    bbox_xyxy = outputs[:,:4]
                    identities = outputs[:,-1]
                    ori_im = draw_bboxes(ori_im, bbox_xyxy, identities)

            end = time.time()
            print("time: {}s, fps: {}".format(end-start, 1/(end-start)))

            if self.args.display:

                cv2.imshow("test", ori_im)
                cv2.waitKey(1)

            # if self.args.save_path:
            #     self.output.write(ori_im)
            

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--VIDEO_PATH", type=str, default=str(0))
    parser.add_argument("--yolo_weights", type=str, default=r"D:\Project\Pest_Detection_Project\YOLOV11\train\RGB_and_Infrared_Person_Detection5\weights\best.pt")  # Changed to YOLOv8 weights
    parser.add_argument("--conf_thresh", type=float, default=0.5)
    parser.add_argument("--nms_thresh", type=float, default=0.4)
    parser.add_argument("--deepsort_checkpoint", type=str, default=r"D:\Downloads\ckpt.t7")
    parser.add_argument("--max_dist", type=float, default=0.2)
    parser.add_argument("--ignore_display", dest="display", action="store_false")
    parser.add_argument("--display_width", type=int, default=640*2)
    parser.add_argument("--display_height", type=int, default=480*2)
    parser.add_argument("--display", type=bool, default=True)
    parser.add_argument("--save_path", type=str, default="")
    parser.add_argument("--use_cuda", type=str, default="True")
    return parser.parse_args()


if __name__=="__main__":
    args = parse_args()
    det = Detector(args)
    det.detect()