"""
Usage -:
    $ python test.py 
"""
import argparse
import os
import platform
import sys
from pathlib import Path
import torch
import numpy as np

from models.common import DetectMultiBackend
from utils.general import (LOGGER, Profile, check_file, check_img_size,  colorstr, cv2,
                           increment_path, non_max_suppression, print_args, scale_boxes, xyxy2xywh, letterbox)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import smart_inference_mode


import io  
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm'  # include image suffixes
print("start")
# Set Param
weights='sim.pt'
visualize=False
hide_labels=False
hide_conf=False
save_dir='output'

# Load Model
#@smart_inference_mode()
model = DetectMultiBackend(weights, device='cpu', dnn=False, data='sim.yaml', fp16=False)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size((640, 640), s=stride)  # check image size 
count = 0

im0 = cv2.imread("1.png")  
im = letterbox(im0, (640, 640), stride=stride, auto=True)[0]  # padded resize
im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im)  # contiguous
# Run inference
model.warmup(imgsz=(1, 3, *imgsz))  # warmup
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
im /= 255  # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
    im = im[None]  # expand for batch dim
# Inference
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=False, visualize=visualize)
pred = non_max_suppression(pred, 0.25, 0.45, None, False, max_det=100)
bbox_list = []  
for i, det in enumerate(pred):  # per image
    annotator = Annotator(im0, line_width=3, example=str(names))
    if len(det):
        # Rescale boxes from img_size to im0 size
        det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
        gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
        # Write results
        for *xyxy, conf, cls in reversed(det):
            res = dict()
            res['id'] = int(cls)
            res['xywh'] = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
            bbox_list.append(res) 
            print("class id:")
            print(int(cls))
            print("xywh:")
            print(res['xywh'])
            c = int(cls)  # integer class
            label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
            annotator.box_label(xyxy, label, color=colors(c, True))
cv2.imwrite(str(count)+'.png', im0)

