import logging
from pathlib import Path
import random

import cv2
import torch

from cjc.detector.detector import Detector
from cjc.tools.logger import Logger
from yolo.utils.general import non_max_suppression, scale_coords, xyxy2xywh
from yolo.utils.plots import plot_one_box


class DetectorForTrack(Detector):

    def __init__(self, _opt, update_sort_ages_cb, update_camera_cb, update_current_frame_cb, pass_detections_to_sort_cb,
                 sort_swap_frames_cb, nothing_detected_cb):
        super(DetectorForTrack, self).__init__(_opt)
        self.update_camera_cb = update_camera_cb
        self.update_sort_ages_cb = update_sort_ages_cb
        self.update_current_frame_cb = update_current_frame_cb
        self.pass_detections_to_sort_cb = pass_detections_to_sort_cb
        self.sort_swap_frames_cb = sort_swap_frames_cb
        self.nothing_detected_cb = nothing_detected_cb
        self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
        self.logger = Logger(__class__.__name__)





    def get_names(self):
        return self.names

    def _process_detection(self, path, img, im0s, vid_cap, pred):
        for i, det in enumerate(pred):  # detections per image
            if self.webcam:  # batch_size >= 1
                p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), self.dataset.count
            else:
                p, s, im0, frame = path, '', im0s, getattr(self.dataset, 'frame', 0)
                # curr_frames[i] = im0
            self.update_current_frame_cb(i, im0)  # 更新图像回调
            self.update_camera_cb(i)  # 相机运动补偿回调
            # p = Path(p)  # to Path
            # save_path = str(self.save_dir / p.name)  # img.jpg
            # txt_path = str(self.save_dir / 'labels' / p.stem) + (
            #     '' if self.dataset.mode == 'image' else f'_{frame}')  # img.txt
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            # if self.opt.view_img:
            #     cv2.imshow(" len(det)",im0)
            #     cv2.waitKey(1)
            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                # Print results
                # for c in det[:, -1].unique():
                #     n = (det[:, -1] == c).sum()  # detections per class
                #     s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string

                xywhs = xyxy2xywh(det[:, 0:4])
                confs = det[:, 4]
                clss = det[:, 5]
                # Write results
                # for *xyxy, conf, cls in reversed(det):    这一段改到回调函数中执行
                #     # if self.save_txt:  # Write to file
                #     xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                #     line = (cls, *xywh, conf) if self.opt.save_conf else (cls, *xywh)  # label format
                #
                #     if self.save_img or self.view_img:  # Add bbox to image
                #         label = f'{self.names[int(cls)]} {conf:.2f}'
                #         plot_one_box(xyxy, im0, label=label, color=self.colors[int(cls)], line_thickness=1)
                self.pass_detections_to_sort_cb(i, xywhs, confs, clss, im0)
                # pass detections to strongsort
                # outputs[i] = strongsort_list[i].update(xywhs.cpu(), confs.cpu(), clss.cpu(), im0)
            else:  # 扫描不到人才进入这个分支
                self.update_sort_ages_cb(i)
                self.nothing_detected_cb(i)
                # if self.has_data_to_be_saved is False and self.need_to_refresh_path and self.how_long_first_person_entered() > self.new_record_delay:

            # Stream results
            # if self.view_img:
            #     cv2.imshow(str(p), im0)
            #     cv2.waitKey(1)  # 1 millisecond

            # Save results (image with detections)
            # if self.save_img:
            #     if self.dataset.mode == 'image':
            #         cv2.imwrite(save_path, im0)
            #         print(f" The image with the result is saved in: {save_path}")
            #     else:  # 'video' or 'stream'
            #         if self.vid_path != save_path:  # new video
            #             self.vid_path = save_path
            #             if isinstance(self.vid_writer, cv2.VideoWriter):
            #                 self.vid_writer.release()  # release previous video writer
            #             if vid_cap:  # video
            #                 fps = vid_cap.get(cv2.CAP_PROP_FPS)
            #                 w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            #                 h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            #             else:  # stream
            #                 fps, w, h = 30, im0.shape[1], im0.shape[0]
            #                 save_path += '.mp4'
            #             self.vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
            #         self.vid_writer.write(im0)
            if self.webcam:
                self.sort_swap_frames_cb(i, im0s[i])
            else:
                self.sort_swap_frames_cb(i, im0s)

        # if self.save_txt or self.save_img:
        #     s = f"\n{len(list(self.save_dir.glob('labels/*.txt')))} labels saved to {self.save_dir / 'labels'}" if self.save_txt else ''
