import cv2
from torchvision.transforms import v2 as T
import torch

# sys.setrecursionlimit(1200)
def set_transformer():
    res = []

    res.append(T.ToImage())
    res.append(T.ToDtype(torch.float32, scale=True))

    return T.Compose(res)


def crop(img):
        
    shape = img.shape[:-1]
    

    mid_x = shape[0] // 2 
    mid_y = shape[1] // 2

    return img[mid_x - 300 : mid_x + 300, mid_y - 400 : mid_y + 400]


class EvalImage:
    def __init__(self, transformer) -> None:
        self.transformer = transformer

    def __call__(self, img):

        return [self.transformer(img)]


class Detection:
    def __init__(self, model, transformer, path=0, device="cpu") -> None:
        self.model = model
        self.transformer = transformer

        self.evalImage = EvalImage(transformer)

        self.sources = cv2.VideoCapture(path)

        if not self.sources.isOpened():
            raise "打开失败,请检查路径"
        
        self.device = device


    def forward(self, x=None):
        flag, frame = self.sources.read()

        while True:
            if not flag or cv2.waitKey(int((1 / 30) * 1000)) == 'q':
                break

            f0 = crop(frame)

            
            frame1 = self.format_data(f0)

            with torch.no_grad():
                frame1 = [im.to(device = self.device) for im in frame1]
                predictions = self.model(frame1)

            
            for box in predictions[0]['boxes']:
                f0 = cv2.rectangle(f0, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255))

            cv2.imshow("frame", f0)

            flag, frame = self.sources.read()
    
    def format_data(self, img):
        return self.evalImage(img)
    



class VideoDetection(Detection):
    def __init__(self, model, transformer = set_transformer(), path = None, device="cpu") -> None:
        super().__init__(model, transformer, path, device)

    def __call__(self):
        self.forward()


