# !/usr/bin/env python
# -*-coding:utf-8 -*-
# Time       ：2021/12/22 15:16
# Author     ：caoxu
# version    ：python 3.8
# Description：oid_yolo detect

from __future__ import division
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
import argparse
import tqdm
import random
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from yolov3_detector.pytorch_yolo.utils.utils import load_classes, rescale_boxes, non_max_suppression, print_environment_info
from yolov3_detector.pytorch_yolo.utils.datasets import ImageFolder
from yolov3_detector.pytorch_yolo.utils.transforms import Resize, DEFAULT_TRANSFORMS
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator


def detect_image(model, image_path, classes_names, output_path, img_size=416, conf_thres=0.5, nms_thres=0.5):
    # Configure input
    image = np.array(Image.open(image_path).convert("RGB"))
    input_img = transforms.Compose([
        DEFAULT_TRANSFORMS,
        Resize(img_size)])(
            (image, np.zeros((1, 5))))[0].unsqueeze(0)

    if torch.cuda.is_available():
        input_img = input_img.to("cuda")

    # Get detections
    with torch.no_grad():
        detections = model(input_img)
        detections = non_max_suppression(detections, conf_thres, nms_thres)
        # Rescale boxes to original image
        detections = rescale_boxes(detections[0], img_size, image.shape[:2])
    get_obj_image(image_path, detections, output_path)
    _save_result_image(image_path, detections, img_size, output_path, classes_names)
    return detections.numpy()


def get_obj_image(image_path, detections, output_path):
    img = Image.open(image_path)
    i = 0
    for x1, y1, x2, y2, conf, cls_pred in detections:
        crop_img = img.crop((float(x1), float(y1), float(x2), float(y2)))
        crop_img.save(output_path + 'object_' + str(i) + '.png')
        i+=1


def _save_result_image(image_path, detections, img_size, output_path, classes):
    # Create plot
    img = np.array(Image.open(image_path))
    plt.figure()
    fig, ax = plt.subplots(1)
    ax.imshow(img)
    # Rescale boxes to original image
    # detections = rescale_boxes(detections, img_size, img.shape[:2])
    unique_labels = detections[:, -1].cpu().unique()
    n_cls_preds = len(unique_labels)
    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, n_cls_preds)]
    bbox_colors = random.sample(colors, n_cls_preds)
    for x1, y1, x2, y2, conf, cls_pred in detections:

        print(f"\t+ Label: {classes[int(cls_pred)]} | Confidence: {conf.item():0.4f}")

        box_w = x2 - x1
        box_h = y2 - y1

        color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
        # Create a Rectangle patch
        bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
        # Add the bbox to the plot
        ax.add_patch(bbox)
        # Add label
        plt.text(
            x1,
            y1,
            s=classes[int(cls_pred)],
            color="white",
            verticalalignment="top",
            bbox={"color": color, "pad": 0})

    # Save generated image with detections
    plt.axis("off")
    plt.gca().xaxis.set_major_locator(NullLocator())
    plt.gca().yaxis.set_major_locator(NullLocator())
    filename = os.path.basename(image_path).split(".")[0]
    output_path = os.path.join(output_path, f"{filename}.png")
    plt.savefig(output_path, bbox_inches="tight", pad_inches=0.0)
    plt.close()


def run():
    print_environment_info()
    parser = argparse.ArgumentParser(description="Detect objects on images.")
    parser.add_argument("-m", "--model", type=str, default="config/yolov3.cfg", help="Path to model definition file (.cfg)")
    parser.add_argument("-w", "--weights", type=str, default="weights/yolov3.weights", help="Path to weights or checkpoint file (.weights or .pth)")
    parser.add_argument("-i", "--images", type=str, default="data/samples", help="Path to directory with images to inference")
    parser.add_argument("-c", "--classes", type=str, default="data/coco.names", help="Path to classes label file (.names)")
    parser.add_argument("-o", "--output", type=str, default="output", help="Path to output directory")
    parser.add_argument("-b", "--batch_size", type=int, default=1, help="Size of each image batch")
    parser.add_argument("--img_size", type=int, default=416, help="Size of each image dimension for yolo")
    parser.add_argument("--n_cpu", type=int, default=8, help="Number of cpu threads to use during batch generation")
    parser.add_argument("--conf_thres", type=float, default=0.5, help="Object confidence threshold")
    parser.add_argument("--nms_thres", type=float, default=0.4, help="IOU threshold for non-maximum suppression")
    args = parser.parse_args()
    print(f"Command line arguments: {args}")

    # Extract class names from file
    classes = load_classes(args.classes)  # List of class names

    # 检测目录
    # detect_directory(
    #     args.model,
    #     args.weights,
    #     args.images,
    #     classes,
    #     args.output,
    #     batch_size=args.batch_size,
    #     img_size=args.img_size,
    #     n_cpu=args.n_cpu,
    #     conf_thres=args.conf_thres,
    #     nms_thres=args.nms_thres)

    # 检测单图
    detect_image(
        args.model,
        args.weights,
        args.images,
        classes,
        args.output,
        img_size=args.img_size,
        conf_thres=args.conf_thres,
        nms_thres=args.nms_thres)


if __name__ == '__main__':
    run()
