from ssd import build_ssd
from data import *
import torch
import cv2
import numpy as np
from torch.autograd import Variable
from matplotlib import pyplot as plt
from xml.dom.minidom import parse
import os.path as osp
import os
from collections import Counter

root_path = '/home/ubuntu/code/ssd.pytorch/data/pollen_data/test'
images_path = osp.join(root_path, 'JPEGImages')

# load net
if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
num_classes = len(VOC_CLASSES) + 1  # +1 background
net = build_ssd('test', 300, 2)  # initialize SSD
net.load_weights('./weights/ssd300_COCO_15000.pth')
net.to(device='cuda')


def predict(image_path, save=True):
    image = cv2.imread(image_path)
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    plt.figure(figsize=(10, 10))

    x = cv2.resize(image, (300, 300)).astype(np.float32)
    x -= (104.0, 117.0, 123.0)
    x = x.astype(np.float32)
    x = x[:, :, ::-1].copy()

    x = torch.from_numpy(x).permute(2, 0, 1)

    with torch.no_grad():

        xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
        if torch.cuda.is_available():
            xx = xx.cuda()

        y = net(xx)

        from data import VOC_CLASSES as labels

        top_k = 10

        plt.figure(figsize=(10, 10))
        colors = plt.cm.hsv(np.linspace(0, 1, 2)).tolist()
        plt.imshow(rgb_image)  # plot the image for matplotlib
        currentAxis = plt.gca()

        detections = y.data
        # scale each detection back up to the image
        scale = torch.tensor(rgb_image.shape[1::-1], dtype=torch.float32).repeat(2)

        for i in range(detections.size(1)):
            j = 0
            if i == 0:
                continue
            while detections[0, i, j, 0] >= 0.6:
                score = detections[0, i, j, 0]
                label_name = labels[i - 1]
                display_txt = '%s: %.2f' % (label_name, score)
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
                color = colors[i]
                currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
                currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor': color, 'alpha': 0.5})
                j += 1
        if save:
            plt.savefig(f'./predict/{image_path.split("/")[-1]}')
        plt.show()
        plt.close()


def accuracy_recall():
    ann_path = osp.join(root_path, 'Annotations')
    hit_num = 0
    predict_pos_num = 0
    names = []
    for i in os.listdir(images_path):
        image_path = osp.join(images_path, i)
        image = cv2.imread(image_path)

        x = cv2.resize(image, (300, 300)).astype(np.float32)
        x -= MEANS
        x = x.astype(np.float32)
        x = x[:, :, ::-1].copy()
        x = torch.from_numpy(x).permute(2, 0, 1)
        image_name = i.replace('.jpg', '')
        DOMTree = parse(osp.join(ann_path, image_name + '.xml'))
        collection = DOMTree.documentElement
        gt = []
        labels = []
        for obj in collection.getElementsByTagName("object"):
            name = obj.getElementsByTagName('name')[0].childNodes[0].data
            names.append(name)
            labels.append(name)
            gt.append(
                [int(obj.getElementsByTagName('xmin')[0].childNodes[0].data),
                 int(obj.getElementsByTagName('ymin')[0].childNodes[0].data),
                 int(obj.getElementsByTagName('xmax')[0].childNodes[0].data),
                 int(obj.getElementsByTagName('ymax')[0].childNodes[0].data)]
            )
        scale = torch.tensor(image.shape[1::-1], dtype=torch.float32).repeat(2)
        gt = torch.Tensor(gt) / scale

        with torch.no_grad():
            xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
            if torch.cuda.is_available():
                xx = xx.cuda()
            y = net(xx)
            detections = y.data
            for i in range(detections.size(1)):
                j = 0
                if i == 0:
                    continue
                while detections[0, i, j, 0] >= 0.6:
                    predict_pos_num += 1
                    label_name = labels[i - 1]
                    pt = detections[0, i, j, 1:].expand_as(gt)
                    # 计算iou，并与符合阈值的gtbox label进行比对， if matched->hit_num+=1
                    min_xy = torch.max(gt[:, :2], pt[:, :2])
                    max_xy = torch.min(gt[:, 2:], pt[:, 2:])
                    overlap_hw = max_xy - min_xy
                    overlap_hw[overlap_hw < 0] = 0
                    overlap_area = overlap_hw[:, 0] * overlap_hw[:, 1]
                    a_hw = gt[:, 2:] - gt[:, :2]
                    b_hw = pt[:, 2:] - pt[:, :2]
                    a_area = a_hw[:, 0] * a_hw[:, 1]
                    b_area = b_hw[:, 0] * b_hw[:, 1]
                    iou = overlap_area / (a_area + b_area - overlap_area)
                    for index, l in enumerate(labels):
                        if iou[index].item() > 0.4 and label_name == l:
                            hit_num += 1
                    j += 1
    count = Counter(names)
    pos_num = 0
    for c in count:
        pos_num += count[c]
    return hit_num / predict_pos_num, hit_num / pos_num


if __name__ == '__main__':
    accuracy,recall = accuracy_recall()
    print(f'准确率:{accuracy}，召回率:{recall}')

    # for i in os.listdir(images_path):
    #     image_path = osp.join(images_path, i)
    #     print(image_path)
    #     predict(image_path, False)

# if torch.cuda.is_available():
#     torch.set_default_tensor_type('torch.cuda.FloatTensor')
# def xavier(param):
#     torch.nn.init.xavier_uniform_(param)
#
#
# def weights_init(m):
#     if isinstance(m, torch.nn.Conv2d):
#         xavier(m.weight.data)
#         m.bias.data.zero_()
#
# criterion = MultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5,
#                              False, True)
# dt = VOCDetection(root='/home/ubuntu/code/ssd.pytorch/data/pollen_data/train',
#                   transform=SSDAugmentation(voc['min_dim'], MEANS))
# data_loader = torch.utils.data.DataLoader(dt, num_workers=2,
#                                           batch_size=16,
#                                           shuffle=True, collate_fn=detection_collate,
#                                           pin_memory=True)
#
# ssd_net = build_ssd('train', 300, 2)
# net = ssd_net
# vgg_weights = torch.load('/home/ubuntu/code/ssd.pytorch/weights/vgg16_reducedfc.pth')
# print('Loading base network...')
# ssd_net.vgg.load_state_dict(vgg_weights)
# ssd_net.extras.apply(weights_init)
# ssd_net.loc.apply(weights_init)
# ssd_net.conf.apply(weights_init)
# print(len(data_loader))
# ssd_net.to('cuda')
# for aaa in range(6):
#     for i in data_loader:
#         images, targets = i
#         output = ssd_net(images.cuda())
#         loss_l, loss_c = criterion(output, [j.cuda() for j in targets])
#         print(loss_l,loss_c)
