import argparse
import os
import platform
import sys
from pathlib import Path
import numpy as np

import torch

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
                                 letterbox, mixup, random_perspective)


weights=r'D:\PYPRJ\yolov5-7.0\game_plug_in\model\best2.pt'
data=r'D:\PYPRJ\yolov5-7.0\data\mydata-1.yaml'
imgsz = [640,640]
file_img = r'p2.png'
conf_thres=0.25
iou_thres=0.45

def load_model():
    # Load model
    device = select_device('')
    model = DetectMultiBackend(weights, device=device, dnn=False, data=data, fp16=False)
    model.warmup(imgsz=(1, 3, *imgsz))  # warmup
    return model


def interface_img(img,model):
    stride, names = model.stride, model.names
    im = letterbox(img, imgsz[0], stride=stride, auto=True)[0]  # padded resize
    im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
    im = np.ascontiguousarray(im)  # contiguous


    seen, windows, dt = 0, [], (Profile(), Profile(), Profile())

    with dt[0]:
        im = torch.from_numpy(im).to(model.device)
        im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
        im /= 255  # 0 - 255 to 0.0 - 1.0
        if len(im.shape) == 3:
            im = im[None]  # expand for batch dim
    # Inference
    with dt[1]:
        pred = model(im, augment=False, visualize=False)

    # NMS
    with dt[2]:
        pred = non_max_suppression(pred, conf_thres, iou_thres, None, False, max_det=1000)

    box_list=[]
    # Process predictions
    for i, det in enumerate(pred):  # per image
        gn = torch.tensor(img.shape)[[1, 0, 1, 0]]  # normalization gain whwh
        s = '%gx%g ' % im.shape[2:]  # print string
        if len(det):
            det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], img.shape).round()
            # Print results
            for c in det[:, 5].unique():
                n = (det[:, 5] == c).sum()  # detections per class
                s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

            for *xyxy, conf, cls in reversed(det):
                xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                line = (names[int(cls)], *xywh, int(100*float(conf))) # label format
                box_list.append(line)
    return box_list






"""
img_h, img_w, _ = img.shape
for _box in box_list:
    x1 = int(_box[1] * img_w - _box[3] * img_w / 2)
    y1 = int(_box[2] * img_h - _box[4] * img_h / 2)
    x2 = int(x1 + _box[3] * img_w)
    y2 = int(y1 + _box[4] * img_h)
    cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3)
    cv2.putText(img, _box[0], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
cv2.imshow('detect_obj', img)
cv2.waitKey(0)
"""





















