#coding=utf8
# Copyright (c) 2016 Tinydot. inc.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import torch.nn as nn
import torch
from .instanceseg import create_stack_pos_map, create_pos_map
from PIL import Image
import os
import numpy as np
import contextlib
import io
import json
from torch_cluster import nearest
from .logger import get_logger
from cityscapesscripts.helpers.labels import trainId2label
from torch.distributed import all_reduce
from torch_scatter import scatter
from .cluster import simple_cluster
from .contours import contours_as_label
from random import shuffle
from copy import deepcopy


class ContoursToInstance(nn.Module):

    def __init__(self, mini_dis=5.0, mini_count=0, ct_thresh_hold=0.6, is_center=False):
        super().__init__()
        self.mini_dis = torch.Tensor([mini_dis, mini_dis])
        self.ct_thresh_hold = ct_thresh_hold
        self.mini_count = mini_count
        self.is_center = is_center
        self.pos_cache = {}

    def create_pos(self, shape, device):
        key = "%s-%s" % (shape, device)
        if key not in self.pos_cache:
            _, w, h = shape
            self.pos_cache[key] = torch.from_numpy(create_pos_map(w, h, norm=False)
                                                   ).permute(2, 0, 1).to(device)
        return self.pos_cache[key]

    def _cts_to_label(self, contours, target_pos, cls_one_hot, ins_mask):
        """
        contours: H, W
        target_pos: 2, H, W
        cls_one_hot: C, H, W
        ins_mask: H, W
        """
        if not self.is_center:
            t_target_pos = target_pos + self.create_pos(target_pos.shape, target_pos.device)
        else:
            t_target_pos = target_pos
        t_target_pos = t_target_pos.permute(1, 2, 0)
        ret_instance = torch.zeros_like(contours).int()
        cts_labels, cts_map = contours_as_label(contours, self.ct_thresh_hold)
        edge_mask = (cts_map > 0) & ins_mask
        # unique mask
        u, idx, c = torch.unique(cts_labels[ins_mask], return_inverse=True, return_counts=True)
        if self.mini_count > 0:
            # mark small items
            small_items = np.zeros_like(u)
            for i, sm in enumerate(c):
                if sm < self.mini_count:
                    small_items[i] = 1
            edge_mask[ins_mask] = edge_mask[ins_mask] | small_items[idx] == 1
            # ignore small item instance mask
            ins_t_mask = ins_mask & ~edge_mask
            _, r_idx = torch.unique(cts_labels[ins_t_mask], return_inverse=True)
        else:
            ins_t_mask = ins_mask
            r_idx = idx
        # calc centers
        target_centers = scatter(t_target_pos[ins_t_mask], r_idx,  dim=-2, reduce="mean")
        labels_edge = nearest(t_target_pos[edge_mask], target_centers)
        labels_target = simple_cluster(target_centers, self.mini_dis.to(t_target_pos.device))[0]
        labels_cls = scatter(cls_one_hot.permute(1, 2, 0)[ins_t_mask], r_idx, dim=0, reduce="sum").argmax(-1)

        instance_mask_id = labels_cls*1000 + labels_target % 1000
        instance_edge_id = instance_mask_id[labels_edge]
        ret_instance[ins_t_mask] = instance_mask_id[r_idx].int()
        ret_instance[edge_mask] = instance_edge_id.int()
        return ret_instance

    def forward(self, predictions, ins_mask):
        """
        predictions: B, C+2+1, H, W
        ins_mask: B, H, W
        """
        b, c, _, _ = predictions.shape
        class_nu = c - 3
        return torch.stack([self._cts_to_label(
            predictions[i][-1, :, :],
            predictions[i][class_nu:class_nu+2, :, :],
            predictions[i][:class_nu, :, :],
            ins_mask[i],
        ) for i in range(b)])


class CenterToInstance(nn.Module):

    def __init__(self, mini_count, mini_dis):
        super().__init__()
        self.mini_count = mini_count
        self.mini_dis = torch.Tensor([mini_dis, mini_dis])

    def _label_cts(self, center_map, cls_seg):
        """
        center_map: 2, W, H
        cls_seg: W, H
        return: W, H
        """
        ret_instance = torch.zeros_like(cls_seg).int()
        ins_mask = cls_seg > 0
        cts_map = center_map.permute(1, 2, 0)
        cts_list = cts_map[ins_mask]
        # cluster center points
        cts_unique, cts_count = torch.unique(cts_list.int(), return_counts=True, dim=0)
        target_cts = cts_unique[cts_count > self.mini_count]
        labels, index = torch.unique(simple_cluster(target_cts, self.mini_dis)[0], return_inverse=True)
        final_cts = scatter(target_cts, index, dim=-2, reduce="mean")[:900]
        # group pixels
        pts_cls = cls_seg[(final_cts[:, 0].long(), final_cts[:, 1].long())]    # use center cls as instance cls
        pts_index = nearest(cts_list.float(), final_cts.float())
        ret_instance[ins_mask] = (pts_index + 1 + 1000*pts_cls[pts_index]).int()
        return ret_instance

    def forward(self, cts, cls_seg):
        """
        inputs:
            offset: B, 2, W, H
            cls_seg: B, W, H
        return:
            B, W, H
        """
        batch = cts.size(0)
        return torch.stack([self._label_cts(cts[i], cls_seg[i]) for i in range(batch)])


class OffsetToInstance(nn.Module):

    def __init__(self, pool_size, max_norm=None):
        super().__init__()
        self.max_norm = max_norm
        padding = (pool_size - 1) // 2
        self.max_pool = nn.MaxPool2d(pool_size, stride=1, padding=padding)
        self.pos_cache = {}

    def create_pos(self, shape, device):
        key = "%s-%s" % (shape, device)
        if key not in self.pos_cache:
            b, _, w, h = shape
            self.pos_cache[key] = torch.from_numpy(create_stack_pos_map(w,
                                                                        h,
                                                                        b, norm=False)
                                                    ).permute(0, 3, 1, 2).to(device)
        return self.pos_cache[key]

    def forward(self, offset, cls_seg):
        """
        offset: B, C, W, H
        cls_seg: B, W, H
        """
        # find center
        with torch.no_grad():
            batch, _, w, h = offset.shape
            max_value = max(offset.shape)
            ign_value = 2*max_value
            cls_seg_background = cls_seg == 0
            assert isinstance(offset, torch.Tensor)
            of_center = torch.zeros_like(cls_seg, requires_grad=False)
            of_norm = offset.norm(dim=1)
            if self.max_norm:
                of_zero = of_norm > self.max_norm
            of_norm = max_value - of_norm
            of_norm[cls_seg_background] = ign_value    # make edges max to ign_value
            of_norm = of_norm.unsqueeze(1)
            of_peak = self.max_pool(of_norm) == of_norm
            assert isinstance(of_peak, torch.Tensor)
            of_peak = of_peak.squeeze(1)
            of_center[of_peak] = 1
            if self.max_norm:
                of_center[of_zero] = 0
            of_center[cls_seg_background] = 0
            # group to center
            instances = torch.zeros([batch, w, h], dtype=torch.int,
                                    device=offset.device, requires_grad=False)
            offset_unr = offset + self.create_pos(offset.shape, offset.device)
            offset_unr = offset_unr.permute(0, 2, 3, 1)
            for b in range(batch):
                # center: [K, 2] -> [K, 1, 2]
                # points: [H*W, 2] -> [1, H*W, 2]
                slc_pos = cls_seg[b] > 0
                slc_cts = torch.nonzero(of_center[b] > 0)[:900]
                slc_cls = cls_seg[b][of_center[b] > 0][:900]
                if slc_cts.shape[0] == 0:
                    continue
                assert slc_cts.shape[0] == slc_cls.shape[0], "0 dim: %s != %s" % (slc_cts.shape, slc_cls.shape)
                slc_pts = offset_unr[b][slc_pos].view(-1, 2)
                # distance: [K, H*W]
                index = nearest(slc_pts.float(), slc_cts.float())
                instances[b][slc_pos] = (index + 1 + 1000*slc_cls[index]).int()
            # ins cls
            return instances


class CityscapsPanopticVal(object):

    def __init__(self, gt_json_file, gt_dir, predict_dir,
                 pool_size=5, max_norm=None,
                 use_center=False, mini_count=10, mini_dis=5,
                 use_train_id=True):
        self.gt_dir = gt_dir
        self.gt_json_file = gt_json_file
        if not use_center:
            self.of2ins = OffsetToInstance(pool_size, max_norm)
        else:
            self.of2ins = CenterToInstance(mini_count=mini_count, mini_dis=mini_dis)
        self.predict_dir = predict_dir
        self.predict_json_file = os.path.join(predict_dir, "predictions.json")
        self.predictions = {}
        self._resultsFile = os.path.join(predict_dir, 'result.json')
        self.use_train_id = use_train_id
        self.logger = get_logger(self.__class__.__name__)

    @staticmethod
    def id2rgb(id_map):
        if isinstance(id_map, np.ndarray):
            id_map_copy = id_map.copy()
            rgb_shape = tuple(list(id_map.shape) + [3])
            rgb_map = np.zeros(rgb_shape, dtype=np.uint8)
            for i in range(3):
                rgb_map[..., i] = id_map_copy % 256
                id_map_copy //= 256
            return rgb_map
        color = []
        for _ in range(3):
            color.append(id_map % 256)
            id_map //= 256
        return color

    def update_result(self, predictions: torch.Tensor,
                      target_files, min_pixel_count=2048,
                      ignore_fc=None, target_size=None):
        """
        predictions: [B, C, W, H], C = cls[19] + offset[2]
        """
        offset = predictions[:, 19:, :, :]
        cls = predictions[:, :19, :, :].argmax(1).int()
        ins_mask = (cls > 10).int()
        # offset: [b, c, w, h]
        # cls: [b, w, h]
        # ins_mask: [b, w, h]
        assert len(offset.shape) == 4
        assert len(cls.shape) == 3
        assert len(ins_mask.shape) == 3
        assert len(target_files) == offset.shape[0]
        assert isinstance(target_files, (list, tuple))
        cls_seg = cls.clone()
        ins_cls = cls.clone()
        cls_seg[ins_mask == 0] = 0
        instances = self.of2ins(offset, cls_seg)
        ins_cls[ins_mask > 0] = instances[ins_mask > 0]
        for b, ip_name in enumerate(target_files):
            im_data = ins_cls[b].cpu().detach().numpy()
            im_id = ip_name.split("_gtFine_instanceIds")[0]
            f_name = "%s_panoptic.png" % im_id
            f_path = os.path.join(self.predict_dir, f_name)
            if im_id in self.predictions:
                continue
            if os.path.isfile(f_path):
                continue
            segments_info = []
            has_ignore_label = False
            set_ignore_label = False
            id_list = []
            for u, c in zip(*np.unique(im_data, return_counts=True)):
                if callable(ignore_fc):
                    if ignore_fc(u, c):
                        im_data[im_data == u] = 0
                        has_ignore_label = True
                        continue
                elif c < min_pixel_count:
                    im_data[im_data == u] = 0
                    has_ignore_label = True
                    continue
                if u < 1000:
                    pred_class = u
                else:
                    pred_class = u // 1000
                assert pred_class < 19, "class id[%d] error" % pred_class
                if not self.use_train_id:
                    pred_class = trainId2label[pred_class]
                if u == 0:
                    set_ignore_label = True
                id_list.append(int(u))
                segments_info.append(
                    {
                        'id': int(u),
                        'category_id': int(pred_class),
                    }
                )
            if has_ignore_label and not set_ignore_label:
                segments_info.append({'id': 0,
                                      'category_id': 0 if self.use_train_id else trainId2label[0]})
                id_list.append(0)
            for u in np.unique(im_data):
                assert int(u) in id_list, "%d not in %s" %(int(u), id_list)
            im_data = self.id2rgb(im_data)
            img_to_save = Image.fromarray(im_data.astype(np.uint8))
            if target_size:
                target_h, target_w = target_size
                img_to_save = img_to_save.resize((target_w, target_h), resample=Image.NEAREST)
            img_to_save.save(f_path)
            self.predictions[im_id] = {
                    'image_id': im_id,
                    'file_name': f_name,
                    'segments_info': segments_info,
            }
        # End

    def save_seg_meta(self, rank_id):
        info_file = "dis_seg_meta_%d.json" % rank_id
        info_path = os.path.join(self.predict_dir, info_file)
        with open(info_path, "w+") as f:
            f.write(json.dumps(self.predictions, indent=2))
        # self.logger.debug("\n[%d]save %d images" % (rank_id, len(self.predictions)))

    def _get_seg_meta_files(self):
        ret = []
        for f in os.listdir(self.predict_dir):
            if "dis_seg_meta_" in f:
                ret.append(os.path.join(self.predict_dir, f))
        return ret

    def load_seg_meta(self):
        seg_meta = {}
        for f in self._get_seg_meta_files():
            with open(f) as fp:
                seg_meta.update(json.load(fp))
        self.logger.debug("\n load %d images" % len(seg_meta))
        self.predictions = seg_meta

    def clear_seg_meta(self):
        while True:
            meta_files = self._get_seg_meta_files()
            if len(meta_files) < 1:
                break
            for f in meta_files:
                os.remove(f)
        # End

    def evaluate(self):
        import cityscapesscripts.evaluation.evalPanopticSemanticLabeling as cityscapes_eval
        with open(self.gt_json_file, "r") as f:
            json_data = json.load(f)
        categories = {el['id']: el for el in json_data['categories']}
        json_data["annotations"] = [v for k, v in self.predictions.items()]
        with open(self.predict_json_file, "w") as f:
            f.write(json.dumps(json_data, indent=2))
        with contextlib.redirect_stdout(io.StringIO()):
            results = cityscapes_eval.evaluatePanoptic(self.gt_json_file, self.gt_dir,
                                                       self.predict_json_file, self.predict_dir,
                                                       self._resultsFile)
        cityscapes_eval.print_results(results, categories)
        return results


class CLassIouVal(object):
    def __init__(self, n_classes, global_rank=None, device=None):
        """
        re-write from: https://github.com/PingoLH/FCHarDNet/blob/master/ptsemseg/metrics.py
        """
        self.n_classes = n_classes
        self.global_rank = global_rank
        self.device= device
        self.confusion_matrix = torch.zeros((n_classes, n_classes),
                                            device=device if device else "cpu")

    def _fast_hist(self, label_true, label_pred, n_class):
        mask = (label_true >= 0) & (label_true < n_class)
        hist = torch.bincount(
            n_class * label_true[mask].int() + label_pred[mask], minlength=n_class ** 2
        ).view([n_class, n_class])
        return hist

    def update(self, label_trues, label_preds):
        assert label_preds.shape == label_trues.shape
        b, w, h = label_preds.shape
        for i in range(b):
            self.confusion_matrix += self._fast_hist(label_trues[i].view(-1),
                                                     label_preds[i].view(-1), self.n_classes)

    def get_scores(self, cls_to_name=None):
        """Returns accuracy score evaluation result.
            - overall accuracy
            - mean accuracy
            - mean IU
            - fwavacc
        """
        if self.global_rank:
            all_reduce(self.confusion_matrix).cpu().detach().numpy()

        hist = self.confusion_matrix.cpu().detach().numpy()
        acc = np.diag(hist).sum() / hist.sum()
        acc_cls = np.diag(hist) / hist.sum(axis=1)
        acc_cls = np.nanmean(acc_cls)
        iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
        mean_iu = np.nanmean(iu)
        freq = hist.sum(axis=1) / hist.sum()
        fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
        cls_iu = dict(zip([ cls_to_name[_] if cls_to_name else _ for _ in range(self.n_classes)], iu))
        self.reset()
        return (
            {
                "Overall Acc": acc,
                "Mean Acc":    acc_cls,
                "FreqW Acc":   fwavacc,
                "Mean IoU":    mean_iu,
            },
            cls_iu,
        )

    def reset(self):
        self.confusion_matrix[...] = 0


class CityscapsIouVal(CLassIouVal):

    def __init__(self, global_rank=None, device=None):
        super().__init__(19, global_rank, device)

    def get_scores(self, cls_to_name=None):
        return super().get_scores(cls_to_name or trainId2label)


class LabelPeak(nn.Module):
    def __init__(self, pool_size=5, min_label=1,
                 max_label=255, label_gap=1, peak_value=0.5,
                 emb_channel=8,
                 emb_radius=0):
        super().__init__()
        self.pool_size = pool_size
        self.label_gap = label_gap
        self.min_label = min_label
        self.max_label = max_label
        self.peak_value= peak_value
        self.emb_channel = emb_channel
        self.emb_radius= emb_radius
        self.label_list = [i for i in range(min_label, max_label, label_gap)]
        padding = (pool_size - 1) // 2
        self.max_pool = nn.MaxPool2d(pool_size, stride=1, padding=padding)
        shuffle(self.label_list)

    def heat_to_peak(self, x: torch.Tensor):
        with torch.no_grad():
            peak = (self.max_pool(x) == x) & (x > self.peak_value)
            assert isinstance(peak, torch.Tensor)
            ret_label = torch.zeros_like(x, device=x.device)
            b, h, w = ret_label.shape
            for i in range(b):
                unique_labels = deepcopy(self.label_list)
                target = ret_label[i][peak[i]]
                end = min(target.shape[0], len(unique_labels))
                target[: end] = torch.Tensor(unique_labels[:end])
                ret_label[i][peak[i]] = target
            shuffle(self.label_list)
            return ret_label

    def peak_to_ins_label(self, peak_pred_label, gt_instance, gt_peak):
        """
        peak_map: B, H, W
        instance_map: B, H, W
        centers: B, H, W
        """
        with torch.no_grad():
            gt_ct = (self.max_pool(gt_peak) == gt_peak) & (gt_peak > self.peak_value)
            assert isinstance(gt_ct, torch.Tensor)
            rt_instance = torch.zeros_like(peak_pred_label)
            b, _, _ = peak_pred_label.shape
            for i in range(b):
                # gt: pos and val
                gt_ct_pos = torch.nonzero(gt_ct[i])
                gt_ct_val = gt_instance[i][(gt_ct_pos[:, 0], gt_ct_pos[:, 1])]
                gt_ct_val_pos = {
                    int(v): gt_ct_pos[_] for _, v in enumerate(gt_ct_val)
                }
                # calc inter
                for u in gt_instance[i].unique():
                    ins_mask = gt_instance[i] == u
                    # find nearest
                    # pred: pos and val
                    pred_ct_pos = torch.nonzero((peak_pred_label[i] > 0) & ins_mask)
                    if pred_ct_pos.shape[0] < 1:
                        continue
                    pred_ct_val = peak_pred_label[i][(pred_ct_pos[:, 0], pred_ct_pos[:, 1])]
                    ct_distance = gt_ct_val_pos[int(u)] - pred_ct_pos
                    rt_instance[i][ins_mask] = pred_ct_val[ct_distance.float().norm(dim=-1).argmin()]
            return rt_instance

    @staticmethod
    def label_as_embed(value, channel):
        tmp_value = [0 for _ in range(channel)]
        v = int(value)
        for i in range(channel):
            tmp_value[i] = v % 2
            v = v // 2
            if v == 0:
                break
        target = torch.Tensor(tmp_value,
                              device=value.device).to(value.device)
        return target

    @staticmethod
    def embed_as_label(embedded_value):
        assert isinstance(embedded_value, torch.Tensor)
        if len(embedded_value.shape) == 1:
            channel = embedded_value.shape[0]
            tmp_value = torch.Tensor([2 ** i for i in range(channel)])
            ret = embedded_value * tmp_value
            return ret.sum(dim=-1)
        else:
            _, channel, _, _ = embedded_value.shape
            tmp_value = torch.Tensor([2 ** i for i in range(channel)])
            ret = embedded_value.permute(0, 2, 3, 1) * tmp_value
            return ret.sum(dim=-1)

    def embedding_peak_label(self, peak_pred_label, emb_radius=None, emb_channel=None):
        """
        peak_pred_label: B, H, W
        """
        channel = emb_channel or self.emb_channel
        radius = emb_radius or self.emb_radius
        assert 2**channel - 1 > peak_pred_label.max(), "%d < %d" % (2**channel - 1,
                                                                    peak_pred_label.max())
        assert isinstance(peak_pred_label, torch.Tensor)
        assert radius >= 0
        # tmp vars
        b, h, w = peak_pred_label.shape
        batch_ret_embedded = torch.zeros((b, h, w, channel), dtype=torch.float32,
                                         device=peak_pred_label.device)
        batch_labeled_mask = torch.zeros_like(peak_pred_label).bool()
        batch_tmp_mask = torch.zeros_like(peak_pred_label).bool()
        for i in range(b):
            positions = torch.nonzero(peak_pred_label[i])
            values = peak_pred_label[i][(positions[:, 0], positions[:, 1])]
            labeled_mask = batch_labeled_mask[i]
            tmp_mask = batch_tmp_mask[i]
            for idx, p in enumerate(positions):
                y, x = p[0], p[1]
                tmp_mask[y - radius: y + 1 + radius, x - radius: x + 1 + radius] = True
                batch_ret_embedded[i][tmp_mask & ~labeled_mask] = self.label_as_embed(values[idx],
                                                                                      channel)
                labeled_mask = labeled_mask | tmp_mask
        return batch_ret_embedded.permute(0, 3, 1, 2)

    def heat_to_voronoi(self, heat_map):
        """
        heat_map: B, H, W
        """
        with torch.no_grad():
            peak = (self.max_pool(heat_map) == heat_map) & (heat_map > self.peak_value)
            assert isinstance(peak, torch.Tensor)
            ret_label = torch.zeros_like(heat_map, device=heat_map.device)
            b, h, w = ret_label.shape
            for i in range(b):
                unique_labels = deepcopy(self.label_list)
                target = ret_label[i][peak[i]]
                end = min(target.shape[0], len(unique_labels))
                target[: end] = torch.Tensor(unique_labels[:end])
                ret_label[i][peak[i]] = target
                # make voronoi
                peak_pos = torch.nonzero(peak[i])
                peak_val = ret_label[i][(peak_pos[:, 0], peak_pos[:, 1])]
                near_pos = torch.nonzero(~peak[i])
                near_to_target = nearest(near_pos.float(), peak_pos.float())
                ret_label[i][(near_pos[:, 0], near_pos[:, 1])] = peak_val[near_to_target]
            shuffle(self.label_list)
            return ret_label

    def voronoi_to_ins_label(self, vr_map, gt_instance, gt_peak):
        with torch.no_grad():
            gt_ct = (self.max_pool(gt_peak) == gt_peak) & (gt_peak > self.peak_value)
            assert isinstance(gt_ct, torch.Tensor)
            rt_instance = torch.zeros_like(vr_map)
            b, _, _ = vr_map.shape
            for i in range(b):
                gt_ct_pos = torch.nonzero(gt_ct[i])
                gt_ct_val = gt_instance[i][(gt_ct_pos[:, 0], gt_ct_pos[:, 1])]
                vr_ct_val = vr_map[i][(gt_ct_pos[:, 0], gt_ct_pos[:, 1])]
                for idx, u in enumerate(gt_ct_val):
                    rt_instance[i][gt_instance[i] == u] = vr_ct_val[idx]
            return rt_instance

    def embedding_ins_label(self, ins_label, emb_channel=None):
        with torch.no_grad():
            assert isinstance(ins_label, torch.Tensor)
            channel = emb_channel or self.emb_channel
            assert 2**channel >= ins_label.max()
            b, h, w = ins_label.shape
            embedding_ins = torch.zeros([b, h, w, channel],
                                        dtype=ins_label.dtype, device=ins_label.device)
            for u in ins_label.unique():
                if u == 0:
                    continue
                embedding_ins[ins_label == u] = self.label_as_embed(u, channel)
            return embedding_ins.permute(0, 3, 1, 2)
