from .center_net import CenterNetTargetGenerator
from .bbox import get_affine_transform, affine_transform
from .data_aug import _distort, _crop, _pad_to_square, _flip, Data_anchor_sample
from .augmentations import preprocess_face
import numpy as np
import cv2
import time


class CenterNetDefaultTrainTransform(object):
    def __init__(self, width, height, num_class=1, scale_factor=4, **kwargs):
        self._kwargs = kwargs
        assert width==height
        self._width = width
        self._height = height
        self._num_class = num_class
        self._scale_factor = scale_factor
        self._max_objects = 32

        self._target_generator = CenterNetTargetGenerator(num_class, width // scale_factor, height // scale_factor)


    def __call__(self, src, targets, img_path):
        """Apply transform to training image/label."""
        image = src.copy()
        boxes = targets[:, :4].copy()
        labels = targets[:, -1:].copy()
        input_h, input_w = self._height, self._width

        # if len(boxes)>self._max_objects:
        #     mask = np.random.choice(range(len(boxes)), self._max_objects)
        #     boxes = boxes[mask]
        #     labels = labels[mask]

        image, boxes, labels = preprocess_face(image, boxes, labels, 'train', self._width, self._height)
        image = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR)
        # image = np.asanyarray(image)

        h, w = image.shape[:2]
        s = max(h, w) * 1.0
        c = np.array([w / 2., h / 2.], dtype=np.float32)
        trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
        # print(image.shape)
        image = cv2.warpAffine(image, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
        output_w = input_w // self._scale_factor
        output_h = input_h // self._scale_factor
        trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
        # bbox
        for i in range(boxes.shape[0]):
            boxes[i, :2] = affine_transform(boxes[i, :2], trans_output)
            boxes[i, 2:4] = affine_transform(boxes[i, 2:4], trans_output)
        centers = (boxes[:, :2] + boxes[:, 2:]) / 2
        # center in image
        mask_a = np.logical_and(np.array([1, 1]) < centers, centers < np.array([output_w-1, output_h-1])).all(axis=1)
        boxes = boxes[mask_a].copy()
        labels = labels[mask_a].copy()

        boxes[:, ::2] = np.clip(boxes[:, ::2], 0, output_w - 1)
        boxes[:, 1::2] = np.clip(boxes[:, 1::2], 0, output_h - 1)


        img = image.copy()
        img = img[:, :, ::-1]
        img = img.astype(np.float32)
        img = (img - 127.5) / 128.0
        img = img.transpose(2, 0, 1).astype(np.float32)

        # generate training target so cpu workers can help reduce the workload on gpu
        gt_bboxes = boxes
        gt_ids = labels

        heatmap, wh_target, wh_mask, center_reg, center_reg_mask = self._target_generator(
            gt_bboxes, gt_ids)

        ## view
        # import matplotlib.pyplot as plt
        # fig = plt.figure()
        # ax = fig.add_subplot(1, 1, 1)
        # image = cv2.resize(image, (200, 200))
        # ax.imshow(image)
        # ax.imshow(heatmap[0], alpha=0.4)#, cmap='rainbow')
        # for box in boxes:
        #     x1, y1, x2, y2 = box 
        #     rect = plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor='r', linewidth=1)
        #     ax.add_patch(rect)
        # plt.draw()
        # plt.show()
        # # plt.savefig('test/out/%s.jpg'%(str(time.time()).replace('.', '')))

        ret = {
            'img': img, 
            'heatmap': heatmap, 
            'wh_target': wh_target, 
            'wh_mask': wh_mask,
            'center_reg': center_reg,
            'center_mask': center_reg_mask,
        }
        return ret



class CenterNetDefaultValTransform(object):
    def __init__(self, width, height):
        self._width = width
        self._height = height
        self._mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
        self._std = np.array([0.229, 0.224, 0.225], dtype=np.float32)

    def __call__(self, src, tragets):
        """Apply transform to validation image/label."""
        # resize
        img = src.copy()
        bbox = tragets[:, :4]
        gt_mask = tragets[:, 4:]
        input_h, input_w = self._height, self._width
        h, w, _ = src.shape
        s = max(h, w) * 1.0
        c = np.array([w / 2., h / 2.], dtype=np.float32)
        trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
        inp = cv2.warpAffine(img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
        output_w = input_w
        output_h = input_h
        trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
        for i in range(bbox.shape[0]):
            bbox[i, :2] = affine_transform(bbox[i, :2], trans_output)
            bbox[i, 2:4] = affine_transform(bbox[i, 2:4], trans_output)
        bbox[:, :2] = np.clip(bbox[:, :2], 0, output_w - 1)
        bbox[:, 2:4] = np.clip(bbox[:, 2:4], 0, output_h - 1)
        img = inp

        # to tensor
        img = img.astype(np.float32) / 255.
        img = (img - self._mean) / self._std
        img = img.transpose(2, 0, 1).astype(np.float32)

        # print(gt_mask.shape)
        ret = {
            'img': img, 
            'gt_bbox': bbox,
            'gt_list': gt_mask,
        }
        return ret



def _get_border(border, size):
    """Get the border size of the image"""
    i = 1
    while size - border // i <= border // i:
        i *= 2
    return border // i