"""
    Yolo test helper

    author: wxz
    date: 
    github: https://github.com/xinzwang
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread

import numpy as np
import torch
from tqdm import tqdm

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.common import DetectMultiBackend
from utils.callbacks import Callbacks
from utils.datasets import create_dataloader
from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml,
                           coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
                           scale_coords, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, time_sync


def process_batch(detections, labels, iouv):
    """
    Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
    Arguments:
        detections (Array[N, 6]), x1, y1, x2, y2, conf, class
        labels (Array[M, 5]), class, x1, y1, x2, y2
    Returns:
        correct (Array[N, 10]), for 10 IoU levels
    """
    correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
    iou = box_iou(labels[:, 1:], detections[:, :4])
    x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5]))  # IoU above threshold and classes match
    if x[0].shape[0]:
        matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()  # [label, detection, iou]
        if x[0].shape[0] > 1:
            matches = matches[matches[:, 2].argsort()[::-1]]
            matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
            # matches = matches[matches[:, 2].argsort()[::-1]]
            matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
        matches = torch.Tensor(matches).to(iouv.device)
        correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
    return correct


class Yolov5Helper(object):
    def __init__(self,
                 weights='yolov5/weights/yolov5s.pt',  # model.pt path(s)
                 batch_size=32,  # batch size
                 imgsz=640,  # inference size (pixels)
                 conf_thres=0.001,  # confidence threshold
                 iou_thres=0.65,  # NMS IoU threshold
                 half=False,  # use FP16 half-precision inference
                 ):
        # Init params
        self.weights = weights
        self.batch_size = batch_size
        self.imgsz = imgsz
        self.half = half
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres

        # Set device
        self.device = select_device(device='', batch_size=self.batch_size)

        # Load model
        self.model = DetectMultiBackend(self.weights, device=self.device, dnn=False)
        self.stride, self.pt, self.jit, self.engine = self.model.stride, self.model.pt, self.model.jit, self.model.engine
        self.imgsz = check_img_size(imgsz, s=self.stride)  # check image size

        # Half
        self.half &= (
                             self.pt or self.jit or self.engine) and self.device != 'cpu'                                                                      ''
        if self.pt or self.jit:
            self.model.model.half() if self.half else self.model.model.float()
        return

    def forward(self, batch_img, batch_label):
        loss = None
        # for img,label in zip(batch_img,batch_label):
        #     # Inference
        #     out,train_out = self.model(im)
        #
        #     # loss
        #     l


        # TODO-wxz: 从batch计算损失

        return loss
