import os

import cv2
import numpy as np
import torch
np.set_printoptions(threshold=np.inf)


class BaseSaver:
    def save(self, save_dir, name, img):
        model_dir = os.path.join(save_dir, self.__class__.__name__)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir, 0o0777)
        cv2.imwrite(os.path.join(model_dir, name[0].split("/")[-1][:-4] + "_{}.jpg".format(self.__class__.__name__)), img)

    @staticmethod
    def channel_transform(img):
        lc = img[:, :, 2]
        img[:, :, 2] = img[:, :, 0]
        img[:, :, 0] = lc
        return img


class Fuse(BaseSaver):
    def __init__(self):
        self.process_list = [BBox(), Heatmap()]

    def process(self, img, metric_dict):
        for p in self.process_list:
            img = p.process(img, metric_dict)
        return img


class BBox(BaseSaver):
    @staticmethod
    def process(img, metric_dict):
        gt_boxes, predict_bboxes = metric_dict['gt_bbox'], metric_dict['predict_bbox']
        x1_, y1_, x2_, y2_ = predict_bboxes[0].tolist()
        cv2.rectangle(img, (x1_, y1_), (x2_, y2_), (0, 0, 255), 2)
        gt_box = gt_boxes[0].tolist()
        x1, y1, x2, y2 = gt_box
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
        return img


class Heatmap(BaseSaver):
    @staticmethod
    def process(img, metric_dict):
        loc_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['heat_map'])
        loc_map = (loc_map[0][::np.newaxis] * 255).astype(np.uint8)
        heatmap = cv2.applyColorMap(loc_map, cv2.COLORMAP_JET)
        return 0.5 * img + 0.5 * heatmap


class Saliency(BaseSaver):
    def process(self, img, metric_dict):
        saliency = cv2.saliency.StaticSaliencySpectralResidual_create()
        (success, saliencyMap) = saliency.computeSaliency(img)
        saliencyMap = (saliencyMap * 255).astype("uint8")
        return saliencyMap


class BkgHeatmap(BaseSaver):
    @staticmethod
    def process(img, metric_dict):
        bkg_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['200_map'])
        bkg_map = (bkg_map[0][::np.newaxis] * 255).astype(np.uint8)
        heatmap = cv2.applyColorMap(bkg_map, cv2.COLORMAP_JET)
        return 0.5 * img + 0.5 * heatmap


class LevelHeatmap(BaseSaver):
    def __init__(self):
        self.normalize = True

    def process(self, img, metric_dict):
        num = metric_dict['level_num']
        count, heatmap_img = 0, None

        for idx in num:
            i_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['heat_map_level{}'.format(idx)],
                                normalize=self.normalize)
            i_map = self.combine_img(img, i_map)
            if isinstance(heatmap_img, np.ndarray):
                heatmap_img = np.hstack((heatmap_img, i_map))
            else:
                heatmap_img = i_map

        i_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['heat_map'],
                            normalize=self.normalize)
        i_map = self.combine_img(img, i_map)
        heatmap_img = np.hstack((heatmap_img, i_map))
        return heatmap_img

    def combine_img(self, img, map):
        if self.normalize:
            map = (map[0] * 255).astype(np.uint8)
        else:
            map = (map[0] * 1).astype(np.uint8)
        heatmap = cv2.applyColorMap(map, cv2.COLORMAP_JET)
        return heatmap


class AllCls(BaseSaver):
    def process(self, img, metric_dict, cls_order=False):
        cls_label = metric_dict['gt_cls'][0]
        order = np.flip(np.argsort(metric_dict['predict_cls'][0][:200])) if cls_order else range(200)
        if cls_order:
            print(order[:10], order[-10:])
            print(np.flip(np.sort(metric_dict['predict_cls'][0]))[:10])
        h, w, bar_width = 10, 20, 10
        count, allcls_img, tmp = 0, None, None

        for _, i in enumerate(order):
            if count < w:
                count += 1
                i_map = get_loc_map(sizes=metric_dict['size'],
                                    predict_heatmap=metric_dict['{}_map'.format(i)])
                i_img = self.combine_img(img, i_map)
                if i == cls_label:
                    i_img[0:bar_width, :, :] = 255
                    i_img[-bar_width:, :, :] = 255
                    i_img[:, 0:bar_width, :] = 255
                    i_img[:, -bar_width:, :] = 255
                if isinstance(tmp, np.ndarray):
                    tmp = np.hstack((tmp, i_img))
                else:
                    tmp = i_img
            else:
                if isinstance(allcls_img, np.ndarray):
                    allcls_img = np.vstack((allcls_img, tmp))
                else:
                    allcls_img = tmp
                count, tmp = 0, None
        return allcls_img

    def combine_img(self, img, map):
        map = (map[0] * 255).astype(np.uint8)
        heatmap = cv2.applyColorMap(map, cv2.COLORMAP_JET)
        return 0.5 * img + 0.5 * heatmap


class OrderCls(AllCls):
    def process(self, img, metric_dict, cls_order=True):
        ordercls_img = super(OrderCls, self).process(img, metric_dict, cls_order=cls_order)
        return ordercls_img


class FeatureMaps(BaseSaver):
    def process(self, img, metric_dict):
        featmaps = metric_dict['featmap'][0]
        h, w = 32, 32
        count, featmap_img, tmp = 0, None, None

        for idx, featmap in enumerate(featmaps):
            if count < w:
                count += 1
                i_map = (get_loc_map(sizes=[torch.tensor(featmap.shape)],
                                     predict_heatmap=featmap[np.newaxis::])[0][::np.newaxis] * 255).astype(np.uint8)
                i_map = cv2.applyColorMap(i_map, cv2.COLORMAP_JET)
                # i_map = self.combine_img(img, i_map)
                if isinstance(tmp, np.ndarray):
                    tmp = np.hstack((tmp, i_map))
                else:
                    tmp = i_map
            else:
                if isinstance(featmap_img, np.ndarray):
                    featmap_img = np.vstack((featmap_img, tmp))
                else:
                    featmap_img = tmp
                count, tmp = 0, None
        return featmap_img

    def combine_img(self, img, map):
        map = (map[0][::np.newaxis] * 255).astype(np.uint8)
        heatmap = cv2.applyColorMap(map, cv2.COLORMAP_JET)
        return 0.5 * img + 0.5 * heatmap


class SalCamOrigincam(BaseSaver):
    @staticmethod
    def process(img, metric_dict):
        sal_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['sal_map'])
        sal_map = (sal_map[0][::np.newaxis] * 255).astype(np.uint8)
        sal_map = cv2.applyColorMap(sal_map, cv2.COLORMAP_JET)
        heat_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['heat_map'])
        heat_map = (heat_map[0][::np.newaxis] * 255).astype(np.uint8)
        heat_map = cv2.applyColorMap(heat_map, cv2.COLORMAP_JET)
        origin_heat_map = get_loc_map(sizes=metric_dict['size'], predict_heatmap=metric_dict['origin_heat_map'])
        origin_heat_map = (origin_heat_map[0][::np.newaxis] * 255).astype(np.uint8)
        origin_heat_map = cv2.applyColorMap(origin_heat_map, cv2.COLORMAP_JET)
        return np.hstack((0.5 * img + 0.5 * sal_map, 0.5 * img + 0.5 * heat_map, 0.5 * img + 0.5 * origin_heat_map))


def get_loc_map(sizes, predict_heatmap, normalize=True):
    """
    Resize heat map to original image size
    """
    loc_map = []
    if normalize:
        max_v = predict_heatmap.max()
        min_v = predict_heatmap.min()
        predict_heatmap = (predict_heatmap - min_v) / (max_v - min_v)
    for i, (size, heatmap) in enumerate(zip(sizes, predict_heatmap)):
        # cv2.resize takes (width, height) for dst dim
        tmp = size.numpy()
        size = np.array([tmp[1], tmp[0]])
        heatmap = cv2.resize(heatmap, tuple(size), interpolation=cv2.INTER_LINEAR)
        loc_map.append(heatmap)
    return np.array(loc_map)
