import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
import numpy as np
import time
import cv2

device = 'cpu'

WIDTH = 600              # 图像缩放为 (600, 600)
INPUT_SIZE = (224, 224)  # CNN的输入尺寸

model = models.resnet50(pretrained=True).to(device)

from PIL import Image

filename = './bike.jpg'
orig = Image.open(filename)
# 等比例缩放图片
orig = orig.resize((WIDTH, int(orig.size[1] / orig.size[0] * WIDTH)))
Width_Height_ratio = orig.size[1] / orig.size[0]
# orig.size

# 转换函数
transform = transforms.Compose([
    transforms.Resize(INPUT_SIZE),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225])
])

# PIL格式转换为OpenCV格式
def PIL2CV2(orig):
    pil_image = orig.copy()
    open_cv_image = np.array(pil_image)
    return open_cv_image[:, :, ::-1].copy()


# 产生 Selective Search 影像
import matplotlib.pyplot as plt

plt.figure(figsize=(16, 16))


def Selective_Search(img_path):
    img = cv2.imread(img_path)
    img = cv2.resize(img, (WIDTH, int(orig.size[1] / orig.size[0] * WIDTH))
                     , interpolation=cv2.INTER_AREA)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # 执行 Selective Search
    cv2.setUseOptimized(True)
    cv2.setNumThreads(8)
    gs = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
    gs.setBaseImage(img)
    gs.switchToSelectiveSearchFast()
    rects = gs.process()
    # print(rects)

    rois = torch.tensor([])  # 候选框
    locs = []  # 位置
    j = 1
    for i in range(len(rects)):
        x, y, w, h = rects[i]
        if w < 100 or w > 400 or h < 100: continue

        # 框与原图的比例
        scale = WIDTH / float(w)

        # 缩放图形以符合模型输入规格
        crop_img = img[y:y + h, x:x + w]
        crop_img = Image.fromarray(crop_img)
        if j <= 100:
            plt.subplot(10, 10, j)
            plt.imshow(crop_img)
        j += 1

        roi = transform(crop_img)
        roi = roi.unsqueeze(0)  # 增加一维(笔数)

        # 加入输出变数中
        if len(rois.shape) == 1:
            rois = roi
        else:
            rois = torch.cat((rois, roi), dim=0)
        locs.append((x, y, x + w, y + h))

    return rois.to(device), locs


rois, locs = Selective_Search(filename)
plt.tight_layout()

# 读取类别列表
with open("imagenet_classes.txt", "r") as f:
    categories = [s.strip() for s in f.readlines()]

# 预测
model.eval()
with torch.no_grad():
    output = model(rois)

# 转成机率
probabilities = torch.nn.functional.softmax(output, dim=1)

# 取得第一名
top_prob, top_catid = torch.topk(probabilities, 1)
probabilities

MIN_CONFIDENCE = 0.4  # 辨识机率门槛值

labels = {}
for (i, p) in enumerate(zip(top_prob.numpy().reshape(-1),
                            top_catid.numpy().reshape(-1))):
    (prob, imagenetID) = p
    label = categories[imagenetID]

    # 机率大于设定值，则放入候选名单
    if prob >= MIN_CONFIDENCE:
        # 只侦测自行车(671)
        if imagenetID != 671: continue # bike
        # 放入候选名单
        box = locs[i]
        print(i, imagenetID)
        L = labels.get(label, [])
        L.append((box, prob))
        labels[label] = L

labels.keys()


def non_max_suppression_slow(boxes, overlapThresh=0.5):
    if len(boxes) == 0:
        return []

    pick = []  # 储存筛选的结果
    x1 = boxes[:, 0]  # 取得候选的视窗的左/上/右/下 座标
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]

    # 计算候选视窗的面积
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    idxs = np.argsort(y2)  # 依视窗的底Y座标排序

    # 比对重叠比例
    while len(idxs) > 0:
        # 最后一笔
        last = len(idxs) - 1
        i = idxs[last]
        pick.append(i)
        suppress = [last]

        # 比对最后一笔与其他视窗重叠的比例
        for pos in range(0, last):
            j = idxs[pos]

            # 取得所有视窗的涵盖范围
            xx1 = max(x1[i], x1[j])
            yy1 = max(y1[i], y1[j])
            xx2 = min(x2[i], x2[j])
            yy2 = min(y2[i], y2[j])
            w = max(0, xx2 - xx1 + 1)
            h = max(0, yy2 - yy1 + 1)

            # 计算重叠比例
            overlap = float(w * h) / area[j]

            # 如果大于门槛值，则储存起来
            if overlap > overlapThresh:
                suppress.append(pos)

        # 删除合格的视窗，继续比对
        idxs = np.delete(idxs, suppress)

    # 传回合格的视窗
    return boxes[pick]


# 扫描每一个类别
for label in labels.keys():
    # if label != categories[671]: continue # bike

    # 复制原图
    open_cv_image = PIL2CV2(orig)

    # 画框
    for (box, prob) in labels[label]:
        (startX, startY, endX, endY) = box
        cv2.rectangle(open_cv_image, (startX, startY), (endX, endY),
                      (0, 255, 0), 2)

    # 显示 NMS(non-maxima suppression) 前的框
    cv2.imshow("Before NMS", open_cv_image)

    # NMS
    open_cv_image2 = PIL2CV2(orig)
    boxes = np.array([p[0] for p in labels[label]])
    proba = np.array([p[1] for p in labels[label]])
    boxes = non_max_suppression_slow(boxes, MIN_CONFIDENCE)  # non max suppression

    color_list = [(0, 255, 0), (255, 0, 0), (255, 255, 0), (0, 0, 0), (0, 255, 255)]
    for i, x in enumerate(boxes):
        # startX, startY, endX, endY, label = x.numpy()
        startX, startY, endX, endY = x  # .numpy()
        # 画框及类别
        cv2.rectangle(open_cv_image2, (int(startX), int(startY)), (int(endX), int(endY))
                      , color_list[i % len(color_list)], 2)
        startY = startY - 15 if startY - 15 > 0 else startY + 15
        cv2.putText(open_cv_image2, str(label), (int(startX), int(startY)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)

    # 显示
    cv2.imshow("After NMS", open_cv_image2)
    cv2.waitKey(0)

cv2.destroyAllWindows()