# from PIL import Image
# import matplotlib.pyplot as plt


# # 使用pillow库读取图片
# img = Image.open(r"C:\Programs\workspace\deep_learning\data\g\mmexport1586341597617.jpg")
# fig = plt.figure(figsize=(6, 6))
# ax1 = fig.add_subplot(111)
# ax1.imshow(img)
# ax1.add_patch(
#         plt.Rectangle(
#             (100, 200),  # (x,y)矩形左下角
#             300,  # width长
#             400,  # height宽
#             color='red', 
#             # alpha=0.5,
#             linewidth=2,
#             fill=False
#         )
#     )

# # # 使用matplotlib自带image库读取图片
# # img = mpimg.imread(r"./jupyter/matplotlib/images/1.jpg")  
# # ax2 = fig.add_subplot(122)
# # ax2.imshow(img)

# plt.show()

import random
import os
from PIL import Image

import torch
import torchvision
import matplotlib.pyplot as plt
# from d2l import torch as d2l
from torch.nn import functional as F
import numpy as np

from ssd import TinySSD
from hardware import try_gpu
from utils import multibox_detection
from dataset import train_transforms


def predict(X):
    net.eval()
    anchors, cls_preds, bbox_preds = net(X.to('cpu'))
    cls_probs = F.softmax(cls_preds, dim=2).permute(0, 2, 1)
    # output = d2l.multibox_detection(cls_probs, bbox_preds, anchors)
    output = multibox_detection(cls_probs, bbox_preds, anchors)
    idx = [i for i, row in enumerate(output[0]) if row[0] != -1]
    return output[0, idx]


# Defined in file: ./chapter_computer-vision/bounding-box.md
def bbox_to_rect(bbox, color):
    """Convert bounding box to matplotlib format."""
    # Convert the bounding box (upper-left x, upper-left y, lower-right x,
    # lower-right y) format to the matplotlib format: ((upper-left x,
    # upper-left y), width, height)
    return plt.Rectangle(xy=(bbox[0], bbox[1]), width=bbox[2] - bbox[0],
                         height=bbox[3] - bbox[1], fill=False,
                         edgecolor=color, linewidth=2)


numpy = lambda x, *args, **kwargs: x.detach().numpy(*args, **kwargs)


# Defined in file: ./chapter_computer-vision/anchor.md
def show_bboxes(axes, bboxes, labels=None, colors=None):
    """Show bounding boxes."""

    def make_list(obj, default_values=None):
        if obj is None:
            obj = default_values
        elif not isinstance(obj, (list, tuple)):
            obj = [obj]
        return obj

    labels = make_list(labels)
    colors = make_list(colors, ['b', 'g', 'r', 'm', 'c'])
    for i, bbox in enumerate(bboxes):
        color = colors[i % len(colors)]
        # rect = d2l.bbox_to_rect(d2l.numpy(bbox), color)
        # rect = bbox_to_rect(d2l.numpy(bbox), color)
        rect = bbox_to_rect(numpy(bbox), color)

        axes.add_patch(rect)
        if labels and len(labels) > i:
            text_color = 'k' if color == 'w' else 'w'
            axes.text(rect.xy[0], rect.xy[1], labels[i], va='center',
                      ha='center', fontsize=9, color=text_color,
                      bbox=dict(facecolor=color, lw=0))


# d2l.show_bboxes
def display(img, output, threshold):
    # d2l.set_figsize((5, 5))
    fig = plt.figure(figsize=(6, 6))
    ax1 = fig.add_subplot(111)
    ax1.imshow(img)

    # fig = d2l.plt.imshow(img)
    for row in output:
        row = row.detach()
        score = float(row[1])
        if score < threshold:
            # print(score)
            continue
        # print(score)
        h, w = img.shape[0:2]
        bboxs = [row[2:6] * torch.tensor((w, h, w, h), device=row.device)]
        # d2l.show_bboxes(fig.axes, bbox, '%.2f' % score, 'w')

        # show_bboxes(axes=ax1, bboxes=bboxs, labels='%.2f' % score, colors='w')
        show_bboxes(axes=ax1, bboxes=bboxs, labels='%.2f' % score, colors=['w'])
        # show_bboxes(axes=ax1, bboxes=bboxs, labels=['ban'], colors=['red'])

    plt.show()


if __name__ == '__main__':
    # # 读取
    net = TinySSD(num_classes=1)
    net.load_state_dict(torch.load('TinySSD2021-08-04-00_36_22.pth'))
    # net.load_state_dict(torch.load(r'C:\Programs\workspace\deep_learning\open\d2l-zh\pytorch\chapter_computer-vision\TinySSD.pth'))

    # img_base_path = r'C:\Programs\workspace\deep_learning\open\d2l-zh\pytorch\data\banana-detection\bananas_train\images'
    # img_path = os.path.join(img_base_path, os.listdir(img_base_path)[random.randint(0, len(os.listdir(img_base_path)))])
    # # X = torchvision.io.read_image('../img/banana.jpg').unsqueeze(0).float()

    img_path = r'C:\Programs\workspace\deep_learning\data\banana-detection\bananas_train\images\1.png'
    img_path = r'test3.jpg'

    X = Image.open(img_path)
    # 图像预处理为训练时一样
    for t in train_transforms:
        X = t(X)

    X = X.unsqueeze(0)  # .float()
    # X = torchvision.io.read_image(
    #     img_path
    # ).unsqueeze(0).float()

    # X = torchvision.io.read_image(r'C:\Programs\workspace\deep_learning\open\d2l-zh\pytorch\data\banana-detection\bananas_train\images\20_Family_Group_Family_Group_20_585.jpg').unsqueeze(0).float()

    # img = X.squeeze(0).permute(1, 2, 0).long()
    img = np.array(Image.open(img_path))
    # img = X.squeeze(0)
    # img = img.permute(1, 2, 0)  # .long()

    # device = try_gpu()
    device = 'cpu'

    # X = X / 255
    output = predict(X)

    # X = torch.tensor(
    #     Image.open(r'C:\Programs\workspace\deep_learning\data\banana-detection\bananas_train\images\1.png')
    #     ).unsqueeze(0).float()
    # img = X.squeeze(0).permute(1, 2, 0).long()

    display(img, output.cpu(), threshold=0.6)
