# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： prepare_denoise
Description :
Author : 'li'
date： 2022/6/17
Change Activity:
2022/6/17:
-------------------------------------------------
"""
import torch


class DenoisePrepare:
    """
    prepare denoise
    """

    def __init__(self, targets: list, embedding_weight, scalar=5, training=True, label_noise_scale=0.2, box_noise_scale=0.4, num_patterns=1, num_queries=300,
                 num_classes=91, hidden_dim=256, box_embedding_dim=4, label_enc=None):
        """
        Args:
            targets: gt info list
            embedding_weight: box embedding weight. [300,4]
            scalar: dn scalar
            training: is training mode.
            label_noise_scale:
            box_noise_scale:
            num_patterns:
            num_queries:
            num_classes:
            hidden_dim:
            label_enc: label embedding [classes,255]
        """
        self.targets = targets
        self.embedding_weight = embedding_weight  # label embedding
        self.batch_size = len(targets)
        self.scalar = scalar
        self.training = training
        self.label_noise_scale = label_noise_scale
        self.box_noise_scale = box_noise_scale
        self.num_patterns = num_patterns
        self.num_queries = num_queries
        self.num_classes = num_classes
        self.hidden_dim = hidden_dim
        self.label_enc = label_enc
        self.box_embedding_dim = box_embedding_dim

    def __call__(self):
        """

        Returns:

        """
        if self.training:
            input_query_label, input_query_bbox, attention_mask, mask_dict = self._build_training_denoise()
        else:

            input_query_label, input_query_bbox, attention_mask, mask_dict = self._build_inference()
        input_query_label = input_query_label.transpose(0, 1)
        input_query_bbox = input_query_bbox.transpose(0, 1)
        return input_query_label, input_query_bbox, attention_mask, mask_dict

    def _build_inference(self):
        indicator0 = torch.zeros([self.num_queries * self.num_patterns, 1]).cuda()
        _t = torch.tensor(self.num_classes).cuda()
        _ta = self.label_enc(_t)
        tgt = _ta.repeat(self.num_queries * self.num_patterns, 1)
        tgt = torch.cat([tgt, indicator0], dim=1)  # tgt num_classes embedding repeat.
        embedding_weight_repeat = self.embedding_weight.repeat(self.num_patterns, 1)
        input_query_label = tgt.repeat(self.batch_size, 1, 1)
        input_query_bbox = embedding_weight_repeat.repeat(self.batch_size, 1, 1)
        attn_mask = None
        mask_dict = None
        return input_query_label, input_query_bbox, attn_mask, mask_dict

    def _scalar_targets(self, concat_labels_index, concat_labels, concat_image_index, concat_boxes):
        """
        repeat by scalar.
        Args:
            concat_labels_index:

        Returns:

        """
        scalar_concat_labels_index = concat_labels_index.repeat(self.scalar, 1).view(-1)
        scalar_concat_labels = concat_labels.repeat(self.scalar, 1).view(-1)
        scalar_concat_image_index = concat_image_index.repeat(self.scalar, 1).view(-1)
        scalar_concat_boxes = concat_boxes.repeat(self.scalar, 1)
        noise_labels = scalar_concat_labels.clone()
        noise_boxes = scalar_concat_boxes.clone()
        return scalar_concat_labels_index, scalar_concat_labels, scalar_concat_image_index, scalar_concat_boxes, noise_labels, noise_boxes

    def _concat_targets(self, gt_boxes_flag):
        """
        concat all tg to one tensor.
        Args:
            gt_boxes_flag:

        Returns:

        """
        concat_labels = torch.cat([t['labels'] for t in self.targets])  # concat all labels
        concat_boxes = torch.cat([t['boxes'] for t in self.targets])  # concat all boxes
        concat_image_index = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(self.targets)])  # image index [[0,0,1,1,1,1]]
        concat_labels_index = torch.nonzero(torch.cat(gt_boxes_flag))
        return concat_labels, concat_boxes, concat_image_index, concat_labels_index

    def _add_noise_to_label(self, noise_labels):
        """

        Returns:

        """
        p = torch.rand_like(noise_labels.float())
        chosen_index = torch.nonzero(p < self.label_noise_scale).view(-1)  # usually half of bbox noise
        new_label = torch.randint_like(chosen_index, 0, self.num_classes)  # randomly put a new one here
        noise_labels.scatter_(0, chosen_index, new_label)
        return noise_labels

    def get_padding_per_image(self, boxes_number_per_img):
        max_label_size = int(max(boxes_number_per_img))
        padding_index_per_image = torch.tensor([])
        if len(boxes_number_per_img) > 0:
            padding_index_per_image = torch.cat([torch.tensor(range(num)) for num in boxes_number_per_img])  # [1,2, 1,2,3]
            padding_index_per_image = torch.cat([padding_index_per_image + max_label_size * i for i in range(self.scalar)]).long()
        return padding_index_per_image

    def _build_training_denoise(self):
        """

        Returns:

        """
        gt_boxes_flag = [(torch.ones_like(t['labels'])).cuda() for t in self.targets]  # tmp
        gt_boxes_index = [torch.nonzero(t) for t in gt_boxes_flag]  # [[1,2,3,4],[1,2,3,4,5,6,7]]
        boxes_number_per_img = [sum(k) for k in gt_boxes_flag]
        padding_index_per_image = self.get_padding_per_image(boxes_number_per_img)
        concat_labels, concat_boxes, concat_image_index, concat_labels_index = self._concat_targets(gt_boxes_flag=gt_boxes_flag)  # concat labels and boxes.
        scalar_concat_labels_index, scalar_concat_labels, scalar_concat_image_index, scalar_concat_boxes, noise_labels, noise_boxes = \
            self._scalar_targets(concat_labels_index, concat_labels, concat_image_index, concat_boxes)  # scalar gt.
        if self.label_noise_scale > 0:
            noise_labels = self._add_noise_to_label(noise_labels)
        if self.box_noise_scale > 0:
            noise_boxes = self._add_noise_to_boxes(noise_boxes)
        input_query_label = self._build_input_query_label(boxes_number_per_img, scalar_concat_image_index, noise_labels, padding_index_per_image)
        input_query_bbox = self._build_input_query_bbox(boxes_number_per_img, scalar_concat_image_index, noise_boxes, padding_index_per_image)
        single_pad = int(max(boxes_number_per_img))
        pad_size = int(single_pad * self.scalar)
        attention_mask = self._build_attention_mask(pad_size)
        mask_dict = {
            'scalar_concat_labels_index': torch.as_tensor(scalar_concat_labels_index).long(),
            'boxes_number_per_img': torch.as_tensor(boxes_number_per_img).long(),
            'padding_index_per_image': torch.as_tensor(padding_index_per_image).long(),
            'label_boxes_scalar': (scalar_concat_labels, scalar_concat_boxes),
            'gt_boxes_index': gt_boxes_index,
            'pad_size': pad_size
        }
        return input_query_label, input_query_bbox, attention_mask, mask_dict

    def _build_attention_mask(self, pad_size):
        """
        build attention mask
        Args:
            pad_size:

        Returns:

        """
        single_pad = int(pad_size / self.scalar)
        tgt_size = pad_size + self.num_queries * self.num_patterns  # same as input_query_label size[0]
        attn_mask = torch.ones(tgt_size, tgt_size) < 0
        attn_mask[pad_size:, :pad_size] = True
        for i in range(self.scalar):
            if i == 0:
                attn_mask[single_pad * i:single_pad * (i + 1), single_pad * (i + 1):pad_size] = True
            if i == self.scalar - 1:
                attn_mask[single_pad * i:single_pad * (i + 1), :single_pad * i] = True
            else:
                attn_mask[single_pad * i:single_pad * (i + 1), single_pad * (i + 1):pad_size] = True
                attn_mask[single_pad * i:single_pad * (i + 1), :single_pad * i] = True
        return attn_mask

    def _add_noise_to_boxes(self, noise_boxes):
        """

        Args:
            noise_boxes:

        Returns:

        """
        diff = torch.zeros_like(noise_boxes)
        diff[:, :2] = noise_boxes[:, 2:] / 2  # h/2 w/2
        diff[:, 2:] = noise_boxes[:, 2:]
        noise_boxes += torch.mul((torch.rand_like(noise_boxes) * 2 - 1.0), diff) * self.box_noise_scale
        noise_boxes = noise_boxes.clamp(min=0.0, max=1.0)
        return noise_boxes

    def _build_input_query_label(self, boxes_number_per_img, scalar_concat_image_index, noise_labels, padding_index_per_image):
        """
        input query is build from noise labels embedding

        Args:
            boxes_number_per_img:
            scalar_concat_image_index:
            noise_labels:

        Returns:

        """
        _t = torch.tensor(self.num_classes)
        _ta = self.label_enc(_t)
        embedding_query = _ta.repeat(self.num_queries * self.num_patterns, 1)  # generate label query
        indicator = torch.zeros([self.num_queries * self.num_patterns, 1])  # The indicator is 1 if a query belongs to the denoising part and 0 otherwise.
        label_query = torch.cat([embedding_query, indicator], dim=1)  # tgt num_classes embedding repeat.
        max_label_size = int(max(boxes_number_per_img))
        pad_size = int(max_label_size * self.scalar)
        label_padding = torch.zeros(pad_size, self.hidden_dim)
        input_query_label = torch.cat([label_padding, label_query], dim=0).repeat(self.batch_size, 1, 1)  # generate base input query,

        if len(scalar_concat_image_index) > 0:
            input_label_embedding = self.label_enc(noise_labels.long())  # embedding noise labels.
            indicator1 = torch.ones([input_label_embedding.shape[0], 1])
            input_label_embedding = torch.cat([input_label_embedding, indicator1], dim=1)
            input_query_label[(scalar_concat_image_index.long(), padding_index_per_image)] = input_label_embedding  # scalar_concat_image_index : image index.
        return input_query_label

    def _build_input_query_bbox(self, boxes_number_per_img, scalar_concat_image_index, noise_boxes, padding_index_per_image):
        """

        Args:
            boxes_number_per_img:
            scalar_concat_image_index:
            noise_boxes:

        Returns:

        """
        max_label_size = int(max(boxes_number_per_img))
        pad_size = int(max_label_size * self.scalar)
        box_embedding_weight_repeat = self.embedding_weight.repeat(self.num_patterns, 1)
        padding_bbox = torch.zeros(pad_size, self.box_embedding_dim)
        input_query_bbox = torch.cat([padding_bbox, box_embedding_weight_repeat], dim=0).repeat(self.batch_size, 1, 1)
        if len(scalar_concat_image_index) > 0:
            input_bbox_embed = self.inverse_sigmoid(noise_boxes)
            input_query_bbox[(scalar_concat_image_index.long(), padding_index_per_image)] = input_bbox_embed
        return input_query_bbox
