import numpy as np
import torch
import scipy.misc
from torchsample.transforms import SpecialCrop, Pad
from torchvision import transforms
import torch.nn.functional as F
import easydict

def bbox_xy2cs(box):
    pass

config = easydict.EasyDict()
config.F_REGION_SCALE = 2.0
config.I_REID_INPUT_WIDTH = 144
config.I_REID_INPUT_HEIGHT = 244

config.REGION_WIDTH = config.F_REGION_SCALE * config.I_REID_INPUT_WIDTH
config.REGION_HEIGHT = config.F_REGION_SCALE * config.I_REID_INPUT_HEIGHT


class opacity(object):
    def __get_region(self,bbox, s):
        x, y, w, h = bbox
        x -= (w*s/2 - w/2)
        y -= (h*s/2 - h/2)
        w *= s
        h *= s
        return(x, y, w, h)

    def region_setup(self, bboxes):
        """
            bboxes: map( int : box [lt_x,lt_y, w, h])
        """
        s = config.F_REGION_SCALE
        for tid, bbox in bboxes.items():
            self.__get_region(bbox, s)
        self.bboxes = bboxes

    def gen_region_slices(self, img):
        iw, ih = config.I_REID_INPUT_WIDTH, config.I_REID_INPUT_HEIGHT
        num = len(self.bboxes.keys())
        img_tensor = torch.ones((num,iw, ih))
        imgs = []
        ids = []
        for index, bbox in self.bboxes.items():
            # x, y, w, h = bbox
            rx, ry, rw, rh = self.__get_region(bbox, config.F_REGION_SCALE)
            # size = torch.IntTensor((rw, rh))
            # target_size = torch.IntTensor((config.REGION_WIDTH, config.REGION_HEIGHT))
            target_size = (config.REGION_WIDTH, config.REGION_HEIGHT)
            # imb = img[:,ry:, rx:]
            # imc = SpecialCrop(size, 1)(imb)
            # imd = transforms.Pad(size)(imc)
            # ime = transforms.Resize(target_size)(imd)\
            imb = img.crop((rx, ry, rx+rw, ry+rh)) #padding automatically
            # imc = transforms.pad()
            imb = imb.resize(target_size)
            # todo toTensor
            imgs.append(imb)
            ids.append(index)
        return ids, imgs

    def slices_composer(self):
        w, h = config.I_REID_INPUT_WIDTH, config.I_REID_INPUT_HEIGHT
        size = torch.IntTensor((w,h))
        transforms.Compose([
            transforms.CenterCrop(),
            transforms.Pad(),
            transforms.Resize(size),
            transforms.toTensor()
            ])

class PostOpacity(object):
    """
        combine feature map;
    """
    def __init__(self):
        pass

def test():
    from PIL import Image
    test_path = './alphapose/examples/demo/1.jpg'
    img = Image.open(test_path)
    opa = opacity()
    bbox = {
        1: [100, 100, 144, 244]
    }
    opa.region_setup(bbox)
    _, imgs = opa.gen_region_slices(img)
    for im in imgs:
        im.show()

if __name__ == '__main__':
    test()