import numpy as np
import torch
import cv2
from dataloader.transforms import *


class TrainAugmentation:
    def __init__(self, size, mean=127.5, std=128.0):
        """
        Args:
            size: the size the of final image.
            mean: mean pixel value per channel.
        """
        self.mean = mean
        self.size = size
        self.augment = Compose([
            ConvertFromInts(),
            # PhotometricDistort(),
            Resize(self.size),
            SubtractMeans(self.mean),
            lambda img: (img / std),
            ToTensor(),
        ])


    def __call__(self, img):
        """

        Args:
            img: the output of cv.imread in RGB layout.
            boxes: boundding boxes in the form of (x1, y1, x2, y2).
            labels: labels of boxes.
        """
        return self.augment(img)


class TestTransform:
    def __init__(self, size, mean=127.5, std=128.0):
        self.transform = Compose([
            ConvertFromInts(),
            Resize(size),
            SubtractMeans(mean),
            lambda img: (img / std),
            ToTensor(),
        ])

    def __call__(self, image):
        return self.transform(image)


class HeilsDataset:

    def __init__(self, gt_imdb, train_transform=None, val_transform=None, is_test=False):
        """Dataset for VOC data.
        Args:
            root: the root of the VOC2007 or VOC2012 dataset, the directory contains the following sub-directories:
                Annotations, ImageSets, JPEGImages, SegmentationClass, SegmentationObject.
        """
        # imagedb = ImageDB(annotation_file)
        # gt_imdb = imagedb.load_imdb()
        self.imdb = gt_imdb

        # self.root = root
        self.train_transform = train_transform
        self.val_transform = val_transform

    def __len__(self):
        return len(self.imdb)

    def __getitem__(self, index):
        image_id = self.imdb[index]['image']
        cls = self.imdb[index]['label']
        bbox_target = self.imdb[index]['bbox_target']
        landmark = self.imdb[index]['landmark_target']

        image = self._read_image(image_id)

        if self.train_transform:
            if self.imdb[index]['flipped']:
                image = image[:, ::-1, :]

            image = self.train_transform(image)
        if self.val_transform:
            image = self.val_transform(image)

        #image = image.cpu().numpy().astype(np.float32).transpose((1, 2, 0))
        # mask = mask.cpu().numpy().astype(np.float32)

        bbox_target = torch.from_numpy(bbox_target.astype(np.float32))
        cls = torch.from_numpy(np.array([cls], np.float32))
        landmark = torch.from_numpy(landmark.astype(np.float32))

        # labels = cls.numpy().astype(np.int32)
        # for idx in range(labels.shape[0]):
        #     if labels[idx] == -2:
        #         im = image.numpy().astype(np.float32).transpose((1, 2, 0))
        #         im = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
        #         ld = landmark.numpy().astype(np.float32)
        #         cv2.circle(im, (int(ld[0] * im.shape[1]), int(ld[1] * im.shape[0])), 2, (0, 0, 255), 1)
        #         cv2.circle(im, (int(ld[2] * im.shape[1]), int(ld[3] * im.shape[0])), 2, (0, 0, 255), 1)
        #         cv2.circle(im, (int(ld[4] * im.shape[1]), int(ld[5] * im.shape[0])), 2, (0, 0, 255), 1)
        #         cv2.circle(im, (int(ld[6] * im.shape[1]), int(ld[7] * im.shape[0])), 2, (0, 0, 255), 1)
        #         cv2.circle(im, (int(ld[8] * im.shape[1]), int(ld[9] * im.shape[0])), 2, (0, 0, 255), 1)
        #
        #         cv2.imshow('image',im)
        #         cv2.waitKey(0)

        return image, cls, bbox_target, landmark


    def _read_image(self, image_file):

        image = cv2.imread(str(image_file))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        return image



