"""
model name : MY_YOLO
file       : detect.py
information:
    author : OuYang
    time   : 2025/1/23
"""
import torch
import torchvision
from PIL import Image
from torchvision import transforms
from utils.utils import xywh2xyxy

from utils.show import draw_boxes_from_PIL, show_image

from model import YOLO


class YOLODetector:
    def __init__(
            self,
            model_path,
            classes_name,
            backbone='resnet34',
            imgsz=448,
            scale_threshold=0.1,
            iou_threshold=0.5,
            num_classes=20,
    ):
        self.model_path = model_path
        self.imgsz = imgsz
        self.backbone = backbone
        self.classes_name = classes_name
        self.scale_threshold = scale_threshold
        self.iou_threshold = iou_threshold
        self.num_classes = num_classes

        self.device = torch.device("cpu")

        self.model = YOLO(num_classes=self.num_classes, backbone=self.backbone).to(self.device)
        self.model.load_state_dict(torch.load(model_path, weights_only=True))
        self.model.eval()

    def preprocess(self, images):
        batch_image = []
        for image in images:
            w, h = image.size
            nn_pad = transforms.Pad((0, 0, 0, 0), fill=0, padding_mode='constant')
            if h > w:
                pad_w = (h - w) // 2
                nn_pad = transforms.Pad((pad_w, 0, pad_w, 0), fill=0, padding_mode='constant')
            elif w > h:
                pad_h = (w - h) // 2
                nn_pad = transforms.Pad((0, pad_h, 0, pad_h), fill=0, padding_mode='constant')

            transform = transforms.Compose([
                nn_pad,
                transforms.Resize((self.imgsz, self.imgsz)),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
            image = transform(image)
            batch_image.append(image)
        return torch.stack(batch_image)

    def preprocess_PIL(self, images):
        batch_image = []
        for image in images:
            w, h = image.size
            nn_pad = transforms.Pad((0, 0, 0, 0), fill=0, padding_mode='constant')
            if h > w:
                pad_w = (h - w) // 2
                nn_pad = transforms.Pad((pad_w, 0, pad_w, 0), fill=0, padding_mode='constant')
            elif w > h:
                pad_h = (w - h) // 2
                nn_pad = transforms.Pad((0, pad_h, 0, pad_h), fill=0, padding_mode='constant')

            transform = transforms.Compose([
                nn_pad,
                transforms.Resize((self.imgsz, self.imgsz)),
            ])
            image = transform(image)
            batch_image.append(image)
        return batch_image

    def postprocess(self, predictions, images):
        batch_size = predictions.size(0)
        s = predictions.size(1)
        b = 2
        cell_size = 1.0 / s

        result_boxs = []
        result_scares = []
        for batch_idx in range(batch_size):
            boxes = []
            classes_id = []
            scores = []
            for i in range(s):
                for j in range(s):
                    for bbox_idx in range(b):
                        conf = predictions[batch_idx][i][j][bbox_idx * 5 + 4]
                        cls_id = predictions[batch_idx][i][j][b * 5:].argmax()
                        cls_score = predictions[batch_idx][i][j][b * 5 + cls_id]

                        if conf < 0.8 and conf * cls_score < self.scale_threshold:
                            continue
                        print(f"i = {i}, j = {j}")
                        print(predictions[batch_idx][i][j])
                        print(f"conf = {conf}, cls_id = {cls_id}, score = {cls_score}")

                        x, y, w, h = predictions[batch_idx][i][j][bbox_idx * 5: bbox_idx * 5 + 4]
                        x = x * cell_size + i * cell_size
                        y = y * cell_size + j * cell_size
                        # print(f"x = {x}, y = {y}, w = {w}, h = {h}")

                        x *= self.imgsz
                        y *= self.imgsz
                        w *= self.imgsz
                        h *= self.imgsz

                        x1, y1, x2, y2 = xywh2xyxy((x, y, w, h))

                        # if i == 3 and j == 3:
                        #     print(predictions[batch_idx][i][j])
                        #     print(x1, y1, x2, y2)
                        boxes.append([x1, y1, x2, y2])
                        classes_id.append(cls_id)
                        scores.append(cls_score * conf)

            if len(boxes) == 0:
                continue

            boxes = torch.tensor(boxes)
            scores = torch.tensor(scores)
            classes_id = torch.tensor(classes_id)
            nms_result = torchvision.ops.nms(boxes, scores, self.iou_threshold)
            result_boxs = boxes[nms_result]
            result_scares = scores[nms_result]
            print(f"{'*' * 20} Image {batch_idx} {'*' * 20}")
            print(result_boxs.tolist())
            print(result_scares.tolist())
            print(classes_id.tolist())
            print([self.classes_name[i] for i in classes_id])

            # nms_boxes = []
            # nms_texts = []
            # for nms_box_i in nms_result.tolist():
            #     # if scores[nms_box_i] < self.conf_threshold:
            #     #     continue
            #     result_boxs.append(boxes[nms_box_i])
            #     result_scares.append(scores[nms_box_i])
            #
            #     x1 = boxes[nms_box_i][0] * self.imgsz
            #     y1 = boxes[nms_box_i][1] * self.imgsz
            #     x2 = boxes[nms_box_i][2] * self.imgsz
            #     y2 = boxes[nms_box_i][3] * self.imgsz
            #
            #     print(
            #         f"Image: {batch_idx} Score: {scores[nms_box_i]} Class: {classes_name[classes_id[nms_box_i]]}"
            #     )
            #     nms_boxes.append([x1, y1, x2, y2])
            #     nms_texts.append(classes_name[classes_id[nms_box_i]])
            draw_boxes_from_PIL(
                images[batch_idx],
                boxes=result_boxs.tolist(),
                scares=result_scares.tolist(),
                texts=[self.classes_name[i] for i in classes_id],
                show=True
            )

        return result_boxs, result_scares

    def detect(self, images):
        inputs = self.preprocess(images)
        images = self.preprocess_PIL(images)
        with torch.no_grad():
            outputs = self.model(inputs)
        boxes, scores = self.postprocess(outputs, images)
        return boxes, scores


if __name__ == '__main__':
    # classes_name = [
    #     'person',
    #     'aeroplane',
    #     'tvmonitor',
    #     'train',
    #     'boat',
    #     'dog',
    #     'chair',
    #     'bird',
    #     'bicycle',
    #     'bottle',
    #     'sheep',
    #     'diningtable',
    #     'horse',
    #     'motorbike',
    #     'sofa',
    #     'cow',
    #     'car',
    #     'cat',
    #     'bus',
    #     'pottedplant',
    # ]
    classes_name = [
        'Bus',
        'Microbus',
        'Minivan',
        'Sedan',
        'SUV',
        'Truck'
    ]
    yolo = YOLODetector(
        model_path='./model_20_car.pth',
        classes_name=classes_name,
        backbone="resnet50",
        scale_threshold=0.3,
        iou_threshold=0.3,
        num_classes=6
    )

    import os
    # images_path = 'D:/space/datasets/voc/VOCtest_06-Nov-2007/VOCdevkit/VOC2007/JPEGImages'
    # images_path = 'data/detectData'
    # images_path = 'D:/space/datasets/coco128/images'
    images_path = 'D:/space/datasets/BITVehicle/valid/images'
    images = []
    for image_name in os.listdir(images_path):
        image_path = os.path.join(images_path, image_name)
        image = Image.open(image_path).convert('RGB')
        yolo.detect([image])
        # images.append(image)

    # images = [
    #     # Image.open('data/test/train/images/2007_000027.jpg').convert("RGB"),
    #     # Image.open('data/test/train/images/2007_000032.jpg').convert("RGB"),
    #     # Image.open('data/test/train/images/2007_000033.jpg').convert("RGB"),
    #     # Image.open('data/test/train/images/2007_000039.jpg').convert("RGB"),
    #     # Image.open('data/test/train/images/2007_000042.jpg').convert("RGB"),
    #     # Image.open('data/test/valid/images/2011_004709.jpg').convert("RGB"),
    #     # Image.open('data/test/valid/images/2011_004711.jpg').convert("RGB"),
    #     # Image.open('data/test/valid/images/2011_004717.jpg').convert("RGB"),
    #     Image.open('data/detectData/000000000081.jpg').convert("RGB"),
    #     Image.open('data/detectData/000000000241.jpg').convert("RGB"),
    # ]

    # yolo.detect(images)
