from ssd import build_ssd
from data import *
import torch
import cv2
import numpy as np
from torch.autograd import Variable
from matplotlib import pyplot as plt
from xml.dom.minidom import parse
import os.path as osp
import os
from collections import Counter
from utils import Filter
import shutil
import xml.etree.ElementTree as ET

# tongzhou20190831 tongzhou20190901 tongzhou20190902 tongzhou20190904 tongzhou20191001
root_path = '/home/ubuntu/code/ssd.pytorch/data/pollen_data_multi_classes/test'
images_path = osp.join(root_path, 'JPEGImages')

# load net
if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
num_classes = len(VOC_CLASSES) + 1  # +1 background
net = build_ssd('test', 300, num_classes)  # initialize SSD
net.load_weights('./weights/VOC_mclass_1110.pth')
net.to(device='cuda')

# 图片过滤
lower_red = np.array((54, 22, 82))
upper_red = np.array((255, 250, 250))

f = Filter()


def image_filter(image: np.ndarray) -> np.ndarray:
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, lower_red, upper_red)
    return cv2.bitwise_and(image, image, mask=mask)


def predict(image_path, save=True, score_threshold=0.8, default_means=True):
    image = cv2.imread(image_path)
    im = image.copy()
    # classification = f.predict(np.expand_dims(np.percentile(image, 50, axis=(0, 1)), 0))
    # cnts, op = f.filter(image, classification)
    # for i in cnts:
    #     (x, y), radius = cv2.minEnclosingCircle(i)
    #     center, radius = (int(x), int(y)), int(radius)  # for the minimum enclosing circle
    #
    #     op = cv2.circle(op, center, radius + 5, 255, -1)  # red
    # image = cv2.bitwise_and(image, image, mask=op)
    # cv2.imwrite('hhh.jpg', image)
    rgb_image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    plt.figure(figsize=(10, 10))

    # lower_blue = np.array((100, 40, 20))
    # upper_blue = np.array((255, 230, 236))
    # hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    # mask = cv2.inRange(hsv, lower_blue, upper_blue)
    # im = cv2.bitwise_and(image, image, mask=mask)

    if default_means:
        x = cv2.resize(image, (300, 300)).astype(np.float32)
        x -= MEANS  # MEANS
    else:
        im = image.astype(np.float32) * 1.1
        im[im > 255] = 255
        # im = image_filter(image)
        x = cv2.resize(im, (300, 300)).astype(np.float32)
        x -= MEANS
    x = x.astype(np.float32)
    x = x[:, :, ::-1].copy()

    x = torch.from_numpy(x).permute(2, 0, 1)

    with torch.no_grad():

        xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
        if torch.cuda.is_available():
            xx = xx.cuda()

        y = net(xx)

        from data import VOC_CLASSES as labels

        top_k = 10

        colors = plt.cm.hsv(np.linspace(0, 1, num_classes + 1)).tolist()
        plt.imshow(rgb_image)  # plot the image for matplotlib
        currentAxis = plt.gca()

        detections = y.data
        # scale each detection back up to the image
        scale = torch.tensor(rgb_image.shape[1::-1], dtype=torch.float32).repeat(2)
        # for i in detections[0]:
        #     print(i)
        # return
        for i in range(detections.size(1)):
            j = 0
            if i == 0:
                continue
            while detections[0, i, j, 0] >= score_threshold:
                score = detections[0, i, j, 0]
                label_name = labels[i - 1]
                display_txt = '%s: %.2f' % (label_name, score)
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
                color = colors[i]
                currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
                currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor': color, 'alpha': 0.5})
                j += 1
        if save:
            # 画出GBTox
            xml_name = image_path.split('/')[-1].replace('.jpg', '.xml')
            xml_path = osp.join(root_path, 'Annotations', xml_name)
            tree = ET.parse(xml_path)
            root = tree.getroot()
            for obj in root.iter('object'):
                label_name = 'GT:' + obj.find('name').text
                bndbox = obj.find('bndbox')
                color = colors[-1]
                xmin = int(bndbox.find('xmin').text)
                ymin = int(bndbox.find('ymin').text)
                xmax = int(bndbox.find('xmax').text)
                ymax = int(bndbox.find('ymax').text)
                coords = (xmin, ymin), xmax - xmin, ymax - ymin
                currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
                currentAxis.text(xmax, ymax, label_name, bbox={'facecolor': color, 'alpha': 0.3})
            plt.savefig(f'./predict/multi/{image_path.split("/")[-1]}')

        # plt.show()
        plt.cla()
        plt.close('all')


def predict_new(image_path, save=True, score_threshold=0.8, default_means=True, dir=None):
    image = cv2.imread(image_path)
    if image is None:
        return
    # cv2.imwrite('hhh.jpg', image)
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    plt.figure(figsize=(10, 10))

    # lower_blue = np.array((100, 40, 20))
    # upper_blue = np.array((255, 230, 236))
    # hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    # mask = cv2.inRange(hsv, lower_blue, upper_blue)
    # im = cv2.bitwise_and(image, image, mask=mask)

    if default_means:
        x = cv2.resize(image, (300, 300)).astype(np.float32)
        x -= MEANS  # MEANS
    else:
        im = image.astype(np.float32) * 1.1
        im[im > 255] = 255
        # im = image_filter(image)
        x = cv2.resize(im, (300, 300)).astype(np.float32)
        x -= MEANS
    x = x.astype(np.float32)
    x = x[:, :, ::-1].copy()

    x = torch.from_numpy(x).permute(2, 0, 1)

    with torch.no_grad():

        xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
        if torch.cuda.is_available():
            xx = xx.cuda()

        y = net(xx)

        from data import VOC_CLASSES as labels

        top_k = 10

        colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
        plt.imshow(rgb_image)  # plot the image for matplotlib
        currentAxis = plt.gca()

        detections = y.data
        # scale each detection back up to the image
        scale = torch.tensor(rgb_image.shape[1::-1], dtype=torch.float32).repeat(2)
        # for i in detections[0]:
        #     print(i)
        # return
        have_targets = False
        for i in range(detections.size(1)):
            j = 0
            if i == 0:
                continue
            while detections[0, i, j, 0] >= score_threshold:
                score = detections[0, i, j, 0]
                label_name = labels[i - 1]
                display_txt = '%s: %.2f' % (label_name, score)
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
                color = colors[i]
                currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
                currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor': color, 'alpha': 0.5})
                j += 1
                have_targets = True
        if save and have_targets:
            if not osp.exists(osp.join('/home/ubuntu/z7z8/detection/1009', dir)):
                os.mkdir(osp.join('/home/ubuntu/z7z8/detection/1009', dir))
            plt.savefig(osp.join('/home/ubuntu/z7z8/detection/1009', dir, image_path.split("/")[-1]))
            if not osp.exists(osp.join('/home/ubuntu/z7z8/result/1009', dir)):
                os.mkdir(osp.join('/home/ubuntu/z7z8/result/1009', dir))
            shutil.copy(image_path, osp.join('/home/ubuntu/z7z8/result/1009', dir))
        # plt.show()
        plt.cla()
        plt.close('all')


def accuracy_recall_map(score_threshold=0.8, default_means=True):
    ann_path = osp.join(root_path, 'Annotations')
    hit_num = 0
    predict_pos_num = 0
    names = []

    precision_recalls = []
    for i in os.listdir(images_path):
        image_path = osp.join(images_path, i)
        image = cv2.imread(image_path)
        classification = f.predict(np.expand_dims(np.percentile(image, 50, axis=(0, 1)), 0))
        cnts, op = f.filter(image, classification)
        for j in cnts:
            (x, y), radius = cv2.minEnclosingCircle(j)
            center, radius = (int(x), int(y)), int(radius)  # for the minimum enclosing circle

            op = cv2.circle(op, center, radius + 5, 255, -1)  # red
        image = cv2.bitwise_and(image, image, mask=op)
        if default_means:
            x = cv2.resize(image, (300, 300)).astype(np.float32)
            x -= MEANS  # MEANS
        else:
            im = image.astype(np.float32) * 1.1
            im[im > 255] = 255
            x = cv2.resize(im, (300, 300)).astype(np.float32)
            x -= MEANS
        # x = cv2.resize(image, (300, 300)).astype(np.float32)

        x = x.astype(np.float32)
        x = x[:, :, ::-1].copy()
        x = torch.from_numpy(x).permute(2, 0, 1)
        image_name = i.replace('.jpg', '')
        DOMTree = parse(osp.join(ann_path, image_name + '.xml'))
        collection = DOMTree.documentElement
        gt = []
        labels = []
        for obj in collection.getElementsByTagName("object"):
            name = obj.getElementsByTagName('name')[0].childNodes[0].data
            names.append(name)
            labels.append(name)

            gt.append(
                [int(obj.getElementsByTagName('xmin')[0].childNodes[0].data),
                 int(obj.getElementsByTagName('ymin')[0].childNodes[0].data),
                 int(obj.getElementsByTagName('xmax')[0].childNodes[0].data),
                 int(obj.getElementsByTagName('ymax')[0].childNodes[0].data)]
            )
        scale = torch.tensor(image.shape[1::-1], dtype=torch.float32).repeat(2)
        gt = torch.Tensor(gt) / scale

        with torch.no_grad():
            xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
            if torch.cuda.is_available():
                xx = xx.cuda()
            y = net(xx)
            detections = y.data
            for i in range(detections.size(1)):
                j = 0
                if i == 0:
                    continue
                while detections[0, i, j, 0] >= score_threshold:
                    predict_pos_num += 1
                    label_name = VOC_CLASSES[i - 1]
                    pt = detections[0, i, j, 1:].expand_as(gt)
                    # 计算iou，并与符合阈值的gtbox label进行比对， if matched->hit_num+=1
                    min_xy = torch.max(gt[:, :2], pt[:, :2])
                    max_xy = torch.min(gt[:, 2:], pt[:, 2:])
                    overlap_hw = max_xy - min_xy
                    overlap_hw[overlap_hw < 0] = 0
                    overlap_area = overlap_hw[:, 0] * overlap_hw[:, 1]
                    a_hw = gt[:, 2:] - gt[:, :2]
                    b_hw = pt[:, 2:] - pt[:, :2]
                    a_area = a_hw[:, 0] * a_hw[:, 1]
                    b_area = b_hw[:, 0] * b_hw[:, 1]
                    iou = overlap_area / (a_area + b_area - overlap_area)
                    for index, l in enumerate(labels):
                        if iou[index].item() > 0.4 and label_name == l:
                            hit_num += 1
                            precision_recalls.append({
                                'score': detections[0, i, j, 0].item(),
                                'flag': True
                            })
                        else:
                            precision_recalls.append({
                                'score': detections[0, i, j, 0].item(),
                                'flag': False
                            })
                    j += 1
    count = Counter(names)
    pos_num = 0
    for c in count:
        pos_num += count[c]
    # 计算mAP, PR曲线详见西瓜书31页
    prs = []
    tp = 0
    precision_recalls.sort(key=lambda a: a['score'], reverse=True)
    for index, item in enumerate(precision_recalls):
        if item['flag']:
            tp += 1
        prs.append((tp / (index + 1), tp / pos_num))
    prs.sort(key=lambda a: a[1], reverse=False)
    aps = []
    for i in range(len(prs)):
        aps.append(np.array([a[0] for a in prs[i:]]).max())

    return hit_num / predict_pos_num, hit_num / pos_num, np.array(aps).mean()


if __name__ == '__main__':
    # precision, recall, mAP = accuracy_recall_map(0.6, default_means=True)
    # print(f'精准率:{precision}，召回率:{recall}, mAP={mAP}')
    for i in os.listdir(images_path):
        image_path = osp.join(images_path, i)
        print(image_path)
        predict(image_path, True, 0.6, default_means=True)
    # a = cv2.imread('./data/')
    # print(a)
    # import re

    # arr = ['fumtg8.27', 'zhengmtg8.28', 'fumtg8.30', 'fu', 'fumtg8.26', 'zhengmtg8.26', 'zhengmtg8.30', 'fumtg8.28',
    #        'fumtg8.29', 'fumtg8.24', 'fumtg8.23', 'fumtg8.25', 'fusjs6.23.1', 'zhengmtg8.24', 'zhengmtg8.25',
    #        'fusjs10.15', 'zhengsjs6.23.1', 'zhengmtg8.27', 'zhengmtg8.29', 'fumtg8.31', 'zhengmtg8.23', 'zhengmtg8.31']
    #
    # for dir in arr:
    #     images_path = osp.join(u'/home/ubuntu/data/待标记生数据/flower', dir)
    #     for i in os.listdir(images_path):
    #         image_path = osp.join(images_path, i)
    #         print(image_path)
    #         predict_new(image_path, True, 0.6, default_means=True, dir=dir)
    # predict('./data/pollen_data_multi_classes/test/JPEGImages/80_99.jpg',False, 0.65, default_means=True)
# if torch.cuda.is_available():
#     torch.set_default_tensor_type('torch.cuda.FloatTensor')
# def xavier(param):
#     torch.nn.init.xavier_uniform_(param)
#
#
# def weights_init(m):
#     if isinstance(m, torch.nn.Conv2d):
#         xavier(m.weight.data)
#         m.bias.data.zero_()
#
# criterion = MultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5,
#                              False, True)
# dt = VOCDetection(root='/home/ubuntu/code/ssd.pytorch/data/pollen_data/train',
#                   transform=SSDAugmentation(voc['min_dim'], MEANS))
# data_loader = torch.utils.data.DataLoader(dt, num_workers=2,
#                                           batch_size=16,
#                                           shuffle=True, collate_fn=detection_collate,
#                                           pin_memory=True)
#
# ssd_net = build_ssd('train', 300, 2)
# net = ssd_net
# vgg_weights = torch.load('/home/ubuntu/code/ssd.pytorch/weights/vgg16_reducedfc.pth')
# print('Loading base network...')
# ssd_net.vgg.load_state_dict(vgg_weights)
# ssd_net.extras.apply(weights_init)
# ssd_net.loc.apply(weights_init)
# ssd_net.conf.apply(weights_init)
# print(len(data_loader))
# ssd_net.to('cuda')
# for aaa in range(6):
#     for i in data_loader:
#         images, targets = i
#         output = ssd_net(images.cuda())
#         loss_l, loss_c = criterion(output, [j.cuda() for j in targets])
#         print(loss_l,loss_c)
