"""
model name : MY_YOLO
file       : model.py
information:
    author : OuYang
    time   : 2025/1/22
"""
import math

import torch
import torchvision
from torch import nn

from utils.utils import calculate_padding, xywh2xyxy
from utils.show import draw_boxes_from_PIL

import warnings

warnings.filterwarnings("ignore", category=UserWarning)


class YOLO(nn.Module):
    def __init__(
            self,
            num_classes=20,
            in_channels=3,
            backbone='resnet34',
            s=7,
            b=2,
            pretrained=True
    ):
        super(YOLO, self).__init__()
        self.num_classes = num_classes
        self.backbone = backbone
        self.in_channels = in_channels
        self.s = s
        self.b = b
        self.pretrained = pretrained

        # Backbone
        self.backbone, out_features = self._build_backbone(backbone)
        for param in self.backbone.parameters():
            param.requires_grad = False

        # Yolo Back 4 layer

        self.conv = nn.Sequential(
            nn.Conv2d(
                in_channels=out_features,
                out_channels=1024,
                kernel_size=3,
                padding=1
            ),
            nn.BatchNorm2d(1024),
            nn.LeakyReLU(),
            nn.Conv2d(1024, 1024, 3, 2, 1),
            nn.BatchNorm2d(1024),
            nn.LeakyReLU(),
            nn.Conv2d(1024, 1024, 3, 1, 1),
            nn.BatchNorm2d(1024),
            nn.LeakyReLU()
        )
        self.conn = nn.Sequential(
            nn.Linear(in_features=7 * 7 * 1024, out_features=4096),
            nn.LeakyReLU(),
            nn.BatchNorm1d(4096),
            nn.Linear(in_features=4096, out_features=self.s * self.s * (self.b * 5 + self.num_classes)),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = self.backbone(x)
        x = self.conv(x)
        x = x.view(x.size(0), -1)
        x = self.conn(x)
        x = torch.reshape(x, (-1, self.s, self.s, self.b * 5 + self.num_classes))
        return x

    def _build_backbone(self, backbone_name):
        ret_backbone = None
        ret_out_features = None
        if backbone_name == 'resnet18':
            ret_backbone = torchvision.models.resnet18(pretrained=self.pretrained)
            ret_out_features = ret_backbone.fc.in_features
        elif backbone_name == 'resnet34':
            ret_backbone = torchvision.models.resnet34(pretrained=self.pretrained)
            ret_out_features = ret_backbone.fc.in_features
        elif backbone_name == 'resnet50':
            ret_backbone = torchvision.models.resnet50(pretrained=self.pretrained)
            ret_out_features = ret_backbone.fc.in_features
        elif backbone_name == 'resnet101':
            ret_backbone = torchvision.models.resnet101(pretrained=self.pretrained)
            ret_out_features = ret_backbone.fc.in_features
        elif backbone_name == 'resnet152':
            ret_backbone = torchvision.models.resnet152(pretrained=self.pretrained)
            ret_out_features = ret_backbone.fc.in_features

        # Delete Back 2 layer
        ret_backbone = nn.Sequential(
            *list(ret_backbone.children())[:-2]
        )

        return ret_backbone, ret_out_features


class YOLODetector:
    def __init__(
            self,
            weights,
            num_classes,
            classes_name,
            in_channels=3,
            device=torch.device('cpu'),
            imgsz=448,
            confidence_threshold=0.5,
            scale_threshold=0.1,
            iou_threshold=0.5,
            model=None,
            backbone='resnet50',
            s=7,
            b=2,
            show=False,
    ):
        self.weights = weights
        self.num_classes = num_classes
        self.classes_name = classes_name
        self.in_channels = in_channels
        self.imgsz = imgsz
        self.confidence_threshold = confidence_threshold
        self.scale_threshold = scale_threshold
        self.iou_threshold = iou_threshold
        self.model = model
        self.backbone = backbone
        self.s = s
        self.b = b
        self.show = show

        # Device
        self.device = device

        # Create model
        if self.model is None:
            self.model = YOLO(
                num_classes=self.num_classes,
                in_channels=self.in_channels,
                backbone=self.backbone,
                s=self.s,
                b=self.b
            ).to(self.device)

        # Load weights
        self.model.load_state_dict(torch.load(self.weights, weights_only=True))

        self.model.eval()

    def _batch_images(self, images, to_tensor=True):

        batch_image = []
        for i, image in enumerate(images):
            shape = image.size

            # Cal pad
            padding = calculate_padding(shape)
            pad = torchvision.transforms.Pad(padding=padding, fill=0, padding_mode='constant')

            transform_list = [
                pad,
                torchvision.transforms.Resize((self.imgsz, self.imgsz))
            ]

            if to_tensor:
                transform_list.append(torchvision.transforms.ToTensor())
                transform_list.append(torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))

            transform = torchvision.transforms.Compose(transform_list)
            image = transform(image)
            batch_image.append(image)

        return batch_image

    def preprocess_to_tensor(self, images):
        batch_image = self._batch_images(images, to_tensor=True)
        return torch.stack(batch_image)

    def preprocess_PIL(self, images):
        return self._batch_images(images, to_tensor=False)

    def postprocess(self, predictions, images):
        batch_size = predictions.size(0)
        s = predictions.size(1)
        b = self.b
        cell_size = 1.0 / s

        result_images = []
        for batch_idx in range(batch_size):
            boxes = []
            classes_id = []
            scores = []

            for i in range(s):
                for j in range(s):
                    for bbox_idx in range(b):

                        # Get prediction
                        conf = predictions[batch_idx][i][j][bbox_idx * 5 + 4]
                        cls_id = predictions[batch_idx][i][j][b * 5:].argmax()
                        cls_score = predictions[batch_idx][i][j][b * 5 + cls_id]

                        # Filter
                        if conf < self.confidence_threshold and conf * cls_score < self.scale_threshold:
                            continue

                        print(
                            f"i = {i} j = {j}\nclass = {self.classes_name[cls_id]} score = {cls_score:.2} conf = {conf:.2} p = {conf * cls_score:.2}"
                        )

                        x, y, w, h = predictions[batch_idx][i][j][bbox_idx * 5: bbox_idx * 5 + 4]
                        x = x * cell_size + i * cell_size
                        y = y * cell_size + j * cell_size

                        x *= self.imgsz
                        y *= self.imgsz
                        w *= self.imgsz
                        h *= self.imgsz

                        x1, y1, x2, y2 = xywh2xyxy((x, y, w, h))

                        boxes.append([x1, y1, x2, y2])
                        classes_id.append(cls_id)
                        scores.append(cls_score * conf)

            if len(boxes) == 0:
                continue

            boxes = torch.tensor(boxes)
            scores = torch.tensor(scores)
            classes_id = torch.tensor(classes_id)

            # NMS
            nms_result = torchvision.ops.nms(boxes, scores, self.iou_threshold)

            # show
            image = draw_boxes_from_PIL(
                images[batch_idx],
                boxes=boxes[nms_result].tolist(),
                scares=scores[nms_result].tolist(),
                texts=[self.classes_name[i] for i in classes_id],
                show=self.show
            )
            result_images.append(image)

        return result_images

    def detect(self, images):

        inputs = self.preprocess_to_tensor(images)
        images = self.preprocess_PIL(images)

        with torch.no_grad():
            inputs = inputs.to(self.device)
            outputs = self.model(inputs)

        result_images = self.postprocess(outputs, images)

        return result_images


if __name__ == '__main__':
    model = YOLO()

    for name, param in model.named_parameters():
        print(name, param.size(), param.requires_grad)

    inputs = torch.randn(3, 3, 448, 448)
    output = model(inputs)
