# -*- coding: utf-8 -*
from mmdet.apis import init_detector, inference_detector
import mmcv
import os
import cv2
import numpy as np
from scipy import ndimage
import time
import PIL.Image
from PIL import Image, ImageDraw, ImageFont
import os.path as osp
import imgviz
import matplotlib
import collections
import labelme
import uuid
import pycocotools
from shutil import copyfile


class Network:
    def __init__(self):
        self.model = init_detector(config_file, checkpoint_file, device='cuda:0')

    def segmentation(self, image_path):
        results = inference_detector(self.model, image_path)
        return results


def main(i, result, class_name_to_id, score_thr, sort_by_density=True):
    img_pil = PIL.Image.open(test_path + i)
    img = np.array(img_pil)
    w, h = img_pil.size

    if not result or result == [None]:
        return
    cur_result = result[0]
    # tensor转为numpy
    seg_label = cur_result[0]
    seg_label = seg_label.cpu().numpy().astype(np.uint8)  # 0 1组成的二值图
    cate_label = cur_result[1].cpu().numpy()
    score = cur_result[2].cpu().numpy()
    # 根据阈值过滤
    vis_inds = score > score_thr
    seg_label = seg_label[vis_inds]
    cate_label = cate_label[vis_inds]
    cate_score = score[vis_inds]
    # 实例个数
    num_mask = seg_label.shape[0]
    # 先画面积大的 后画面积小的实例
    if sort_by_density:
        mask_density = []
        for idx in range(num_mask):
            cur_mask = seg_label[idx, :, :]
            cur_mask = mmcv.imresize(cur_mask, (w, h))
            cur_mask = (cur_mask > 0.5).astype(np.int32)
            mask_density.append(cur_mask.sum())
        orders = np.argsort(mask_density)
        seg_label = seg_label[orders]
        cate_label = cate_label[orders]
        cate_score = cate_score[orders]

    # 根据检测结果得到masks
    if num_mask > 0:
        masks = get_masks(seg_label, num_mask, cate_label, cate_score, w, h, class_name_to_id, img)
        vis_save(img, masks, class_name_to_id, base)


def get_masks(seg_label, num_mask, cate_label, cate_score, w, h, class_name_to_id, img):
    ins_num = 1
    masks = {}  # for area
    segmentations = collections.defaultdict(list)  # for segmentation
    # 遍历labelme标注文件，提取所有信息
    for idx in range(num_mask):
        idx = -(idx + 1)
        cur_mask = seg_label[idx, :, :]
        cur_mask = mmcv.imresize(cur_mask, (w, h))
        cur_mask = (cur_mask > 0.5).astype(np.uint8)
        if cur_mask.sum() == 0:
            continue
        cur_mask_bool = cur_mask.astype(np.bool)
        cur_cate = cate_label[idx]  # 类别的代号0
        cur_score = cate_score[idx]
        label = "".join([k for k, v in class_name_to_id.items() if v == cur_cate + 1])

        instance = (label, int(cur_score * 100))
        masks[instance] = cur_mask_bool

        # 将每张图片检测到的实例都单独保存
        if every_ins_viz:
            masks_ins = {instance: cur_mask_bool}
            vis_ins_save(img, masks_ins, class_name_to_id, base, ins_num, label)
            ins_num += 1

    return masks


def vis_ins_save(img, masks_ins, class_name_to_id, base, ins_num, label):
    viz = img
    # 把置信度写入captions
    if masks_ins:
        labels, captions, masks = zip(
            *[
                (class_name_to_id[cnm], cnm + str(":") + str(score), msk)
                for (cnm, score), msk in masks_ins.items()
                if cnm in class_name_to_id
            ]
        )
        viz = imgviz.instances2rgb(
            image=img,
            labels=labels,
            masks=masks,
            captions=captions,
            font_size=15,
            line_width=2,
            boundary_width=1,  # 轮廓边宽度
            alpha=0.5,  # 混合图像的比例
            colormap=colormap,  # 类别映射的关系
        )
    save_ins_dir = osp.join(save_path, "single", base + "/")
    if not os.path.exists(save_ins_dir):
        os.makedirs(save_ins_dir)
    out_viz_file = osp.join(save_ins_dir, base + '_' + str(label) + '_' + str(ins_num) + '.jpg')
    imgviz.io.imsave(out_viz_file, viz)
    masks_ins.clear()


def vis_save(img, masks, class_name_to_id, base):
    viz = img
    # 把置信度写入captions
    if masks:
        labels, captions, masks = zip(
            *[
                (class_name_to_id[cnm], cnm + str(":") + str(score), msk)
                for (cnm, score), msk in masks.items()
                if cnm in class_name_to_id
            ]
        )
        viz = imgviz.instances2rgb(
            image=img,
            labels=labels,
            masks=masks,
            captions=captions,
            font_size=15,
            line_width=2,
            boundary_width=1,  # 轮廓边宽度
            alpha=0.5,  # 混合图像的比例
            colormap=colormap,  # 类别映射的关系
        )
    # 保存
    out_viz_file = osp.join(save_path, "whole", base + "_out.jpg")
    imgviz.io.imsave(out_viz_file, viz)
    # 将whole 放进single
    if every_ins_viz:
        copyfile(out_viz_file, osp.join(save_path, "single", base, base + "_out.jpg"))


def create_shanghai_label_colormap():
    """创建用于 shanghai 分割基准的标签颜色图
    Returns:
        用于可视化分割结果的颜色图。
    """
    colormap = np.zeros((256, 3), dtype=np.uint8)
    colormap[0] = [0, 0, 0]
    colormap[1] = [240, 230, 140]  # Domestic waste 卡其布
    colormap[2] = [251, 255, 230]  # Construction waste 白烟
    colormap[3] = [115, 112, 96]  # forest_empty
    colormap[4] = [233, 239, 237]  # road_surface
    colormap[5] = [87, 90, 83]  # illegal_building
    colormap[6] = [129, 205, 140]  # Interplanting
    colormap[7] = [177, 183, 169]  # Uncovered_GH
    colormap[8] = [179, 199, 174]  # river
    colormap[9] = [154, 167, 175]  # greenhouse
    colormap[10] = [121, 121, 118]  # Rubble
    colormap[11] = [146, 164, 171]  # Land_cover
    colormap[12] = [202, 203, 184]  # Dead trees
    colormap[13] = [101, 138, 79]  # leaning-tree
    colormap[14] = [226, 221, 215]  # Gravel
    colormap[15] = [161, 156, 158]  # forest_empty2

    return colormap


if __name__ == '__main__':
    test_path = "/media/glc/Elements/project/SOLO-TensorRT/shanghai-data/data-2022-0929-SOLO/coco/test2017/"
    save_path = "/media/glc/jack/Project_ShangHai/Flask-Detector-Shanghai-solo-15/Videos/20221007_test/solo_det_1000/"
    config_file = '/media/glc/jack/Project_ShangHai/Flask-Detector-Shanghai-solo-15/models/solov2_up_0929_r50.py'
    checkpoint_file = '/media/glc/jack/Project_ShangHai/Flask-Detector-Shanghai-solo-15/models/epoch_0929_r50_49.pth'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
        os.makedirs(save_path + "whole")
        os.makedirs(save_path + "single")
        # os.makedirs(save_path + "json")

    # class_name_to_id = {
    #     "_background_": 0, "Domestic waste": 1, "Construction waste": 2, "forest_empty": 3, "road_surface": 4,
    #     "illegal_building": 5, "Interplanting": 6, "Uncovered_GH": 7, "river": 8, "greenhouse": 9, "Rubble": 10,
    #     "Land_cover": 11, "Dead trees": 12, "leaning-tree": 13, "Gravel": 14, "forest_empty2": 15,
    # }
    class_name_to_id = {
        "_background_": 0, "forest_empty": 1, "road_surface": 2, "illegal_building": 3, "river": 4, "greenhouse": 5
    }

    colormap = create_shanghai_label_colormap()

    det_thr = 0.4  # 检测阈值
    sort_by_density = True  # 先画面积大的 在画面积小的
    every_ins_viz = True  # 将每张图片检测到的实例都单独保存
    network = Network()

    for i in os.listdir(test_path):
        base = osp.splitext(osp.basename(i))[0]
        # 计算单张分割时间
        start_time = time.time()
        segment_results = network.segmentation(test_path + i)
        end_time = time.time()
        t = end_time - start_time
        print("图片:{} 检测用时: {}秒, FPS={}".format(os.path.basename(i), round(t, 2), round(1 / t, 1)))
        main(i, segment_results, class_name_to_id, score_thr=det_thr, sort_by_density=True)
