import glob
import random
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# from skimage.transform import resize
import sys
import cv2
from .augmentations import *

import imgaug as ia 
from imgaug import augmenters as iaa 
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
import random 

class ListDataset(Dataset):
    def __init__(self, list_path, transform=Yolov3Augmentation()):
        with open(list_path, 'r') as file:
            self.img_files = file.readlines()
        self.label_files = [path.replace('images', 'labels').replace('.png', '.txt').replace('.jpg', '.txt') for path in self.img_files]
        self.transform = transform

    def __getitem__(self, index):
        #
        img_path = self.img_files[index % len(self.img_files)].rstrip()
        img = np.array(Image.open(img_path))

        # Handles images with less than three channels
        while len(img.shape) != 3:
            index += 1
            img_path = self.img_files[index % len(self.img_files)].rstrip()
            img = np.array(Image.open(img_path))

        h, w, _ = img.shape

        label_path = self.label_files[index % len(self.img_files)].rstrip()
        labels = None
        if os.path.exists(label_path):
            labels = np.loadtxt(label_path).reshape(-1, 5)

        target = None
        if labels is not None:
            label = labels[:, 0]
            bboxes = labels[:, 1:]
            x1 = (bboxes[:, 0] - bboxes[:, 2]/2)
            y1 = (bboxes[:, 1] - bboxes[:, 3]/2)
            x2 = (bboxes[:, 0] + bboxes[:, 2]/2)
            y2 = (bboxes[:, 1] + bboxes[:, 3]/2)
            bboxes = np.vstack((x1, y1, x2, y2)).transpose()

            if self.transform:
                image, bboxes, labels = self.transform(img.copy(), bboxes.copy(), label.copy())

            target = np.hstack((bboxes, np.expand_dims(labels, axis=1)))

        return torch.from_numpy(image).permute(2, 0, 1), target

    def __len__(self):
        return len(self.img_files)


class MyDataset(Dataset):
    def __init__(self, root_path, file_list, transform=None, img_dim=256):
        super(MyDataset, self).__init__()
        self.root_path = root_path
        with open(file_list, "r") as f:
            self.samples = f.readlines()
        self.transform = transform
        self.img_dim = img_dim
        self.index = 0 

        # for image transform 
        self.seq_image = iaa.Sequential([
            
            # iaa.Crop(percent=(0, 0.1)), # random crops
            # Small gaussian blur with random sigma between 0 and 0.5.
            # But we only blur about 50% of all images.
            iaa.Sometimes(
                0.5,
                iaa.OneOf([
                    iaa.GaussianBlur(sigma=(0, 0.5)),
                    iaa.MedianBlur(k=(1, 7))
                ])
            ),
            # Strengthen or weaken the contrast in each image.
            iaa.LinearContrast((0.75, 1.5)),
            # Add gaussian noise.
            # For 50% of all images, we sample the noise once per pixel.
            # For the other 50% of all images, we sample the noise per pixel AND
            # channel. This can change the color (not only brightness) of the
            # pixels.
            iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
            # Make some images brighter and some darker.
            # In 20% of all cases, we sample the multiplier once per channel,
            # which can end up changing the color of the images.
            iaa.Multiply((0.8, 1.2), per_channel=0.2),
        ], random_order=True) # apply augmenters in random order

        self.seq_image_heavy = iaa.Sequential(
            [
                #
                # Apply the following augmenters to most images.
                #
                # iaa.Fliplr(0.5), # horizontally flip 50% of all images
                # iaa.Flipud(0.2), # vertically flip 20% of all images

                # crop some of the images by 0-10% of their height/width
                # iaa.sometimes(iaa.Crop(percent=(0, 0.1))),

                # Apply affine transformations to some of the images
                # - scale to 80-120% of image height/width (each axis independently)
                # - translate by -20 to +20 relative to height/width (per axis)
                # - rotate by -45 to +45 degrees
                # - shear by -16 to +16 degrees
                # - order: use nearest neighbour or bilinear interpolation (fast)
                # - mode: use any available mode to fill newly created pixels
                #         see API or scikit-image for which modes are available
                # - cval: if the mode is constant, then use a random brightness
                #         for the newly created pixels (e.g. sometimes black,
                #         sometimes white)
                # iaa.sometimes(iaa.Affine(
                #     scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                #     translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
                #     rotate=(-45, 45),
                #     shear=(-16, 16),
                #     order=[0, 1],
                #     cval=(0, 255),
                #     mode=ia.ALL
                # )),

                #
                # Execute 0 to 5 of the following (less important) augmenters per
                # image. Don't execute all of them, as that would often be way too
                # strong.
                #
                iaa.SomeOf((0, 5),
                    [
                        # Convert some images into their superpixel representation,
                        # sample between 20 and 200 superpixels per image, but do
                        # not replace all superpixels with their average, only
                        # some of them (p_replace).
                        iaa.Sometimes(0.5,
                            iaa.Superpixels(
                                p_replace=(0, 1.0),
                                n_segments=(20, 200)
                            )
                        ),

                        # Blur each image with varying strength using
                        # gaussian blur (sigma between 0 and 3.0),
                        # average/uniform blur (kernel size between 2x2 and 7x7)
                        # median blur (kernel size between 3x3 and 11x11).
                        iaa.OneOf([
                            iaa.GaussianBlur((0, 3.0)),
                            iaa.AverageBlur(k=(2, 7)),
                            iaa.MedianBlur(k=(3, 11)),
                        ]),

                        # Sharpen each image, overlay the result with the original
                        # image using an alpha between 0 (no sharpening) and 1
                        # (full sharpening effect).
                        iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),

                        # Same as sharpen, but for an embossing effect.
                        iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),

                        # Search in some images either for all edges or for
                        # directed edges. These edges are then marked in a black
                        # and white image and overlayed with the original image
                        # using an alpha of 0 to 0.7.
                        iaa.Sometimes(0.5, iaa.OneOf([
                            iaa.EdgeDetect(alpha=(0, 0.7)),
                            iaa.DirectedEdgeDetect(
                                alpha=(0, 0.7), direction=(0.0, 1.0)
                            ),
                        ])),

                        # Add gaussian noise to some images.
                        # In 50% of these cases, the noise is randomly sampled per
                        # channel and pixel.
                        # In the other 50% of all cases it is sampled once per
                        # pixel (i.e. brightness change).
                        iaa.AdditiveGaussianNoise(
                            loc=0, scale=(0.0, 0.05*255), per_channel=0.5
                        ),

                        # Either drop randomly 1 to 10% of all pixels (i.e. set
                        # them to black) or drop them on an image with 2-5% percent
                        # of the original size, leading to large dropped
                        # rectangles.
                        iaa.OneOf([
                            iaa.Dropout((0.01, 0.1), per_channel=0.5),
                            iaa.CoarseDropout(
                                (0.03, 0.15), size_percent=(0.02, 0.05),
                                per_channel=0.2
                            ),
                        ]),

                        # Invert each image's channel with 5% probability.
                        # This sets each pixel value v to 255-v.
                        iaa.Invert(0.05, per_channel=True), # invert color channels

                        # Add a value of -10 to 10 to each pixel.
                        iaa.Add((-10, 10), per_channel=0.5),

                        # Change brightness of images (50-150% of original value).
                        iaa.Multiply((0.5, 1.5), per_channel=0.5),

                        # Improve or worsen the contrast of images.
                        iaa.LinearContrast((0.5, 2.0), per_channel=0.5),

                        # Convert each image to grayscale and then overlay the
                        # result with the original with random alpha. I.e. remove
                        # colors with varying strengths.
                        iaa.Grayscale(alpha=(0.0, 1.0)),

                        # # In some images move pixels locally around (with random
                        # # strengths).
                        # iaa.Sometimes(0.5,
                        #     iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
                        # ),

                        # In some images distort local areas with varying strength.
                        # iaa.Sometimes(0.5, iaa.PiecewiseAffine(scale=(0.01, 0.05)))
                    ],
                    # do all of the above augmentations in random order
                    random_order=True
                )
            ],
            # do all of the above augmentations in random order
            random_order=True
        )

        # for image and bbox transform 
        self.seq_bbox = iaa.Sequential([
            iaa.OneOf([
                iaa.Fliplr(0.5),
                iaa.Flipud(0.5),
            ]),
            iaa.Affine(translate_px={"x": (-40, 40), "y": (-40, 40)})
        ])

    def __len__(self):
        return len(self.samples)

    def collate_fn(self, batch):
        imgs, targets = list(zip(*batch))
        # Remove empty placeholder targets
        targets = [boxes for boxes in targets if boxes is not None]
        # Add sample index to targets
        for i, boxes in enumerate(targets):
            boxes[:, 0] = i
        targets = torch.cat(targets, 0)

        # # Selects new image size every tenth batch
        # if self.multiscale and self.batch_count % 10 == 0:
        #     self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))
        # # Resize images to input shape
        # imgs = torch.stack([torchResize(img, self.img_size) for img in imgs])
        # self.batch_count += 1

        imgs = torch.stack(imgs)
        return imgs, targets
    def __getitem__(self, index):
        idx = index
        while True:
            # image_path|num|x|y|w|h|label, norm value
            sample = self.samples[idx].strip()
            sample = sample.split('|')
            img_path = os.path.join(self.root_path, sample[0])
            img = np.array(Image.open(img_path))
            if img is None:
                idx += 1
                if idx > len(self.samples) - 1:
                    idx = 0
                continue
            else:
                break
        if img.shape != 3:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        img = cv2.resize(img, (self.img_dim, self.img_dim))

        num = int(sample[1])
        bbox = []
        label = []
        for i in range(num):
            x = float(sample[i * 5 + 2])
            y = float(sample[i * 5 + 3])
            w = float(sample[i * 5 + 4])
            h = float(sample[i * 5 + 5])
            l = int(sample[i * 5 + 6])
            # x1, x2, y1, y2
            bbox += [[x,y,x + w,y + h]]
            label += [l]
        bbox = np.array(bbox) # N,4
        label = np.array(label)
        if self.transform:
            # image, bboxes, labels = self.transform(img.copy(), bbox.copy(), label.copy())
            # image transform 
            if random.random() > 0.9:
                img = self.seq_image(images=[img]) 
                img = img[0] 

            # for bounding box 
            iaa_bbs = []
            h = img.shape[0]
            w = img.shape[1]
            for i in range(len(bbox)):
                tmp_box = BoundingBox(x1=bbox[i][0] * w, 
                y1=bbox[i][1]*h, 
                x2=bbox[i][2]*w, 
                y2=bbox[i][3]*h, 
                label=label[i]) 
                iaa_bbs += [tmp_box]
            bbs = BoundingBoxesOnImage(iaa_bbs, shape=img.shape)
            img_aug, bbs_aug = self.seq_bbox(image=img, bounding_boxes=bbs)
            bboxes = [] 
            labels = [] 
            h = img_aug.shape[0] 
            w = img_aug.shape[1] 
            for i in range(len(bbs_aug)):
                bbs = bbs_aug[i] 
                
                bboxes += [[bbs.x1 / w, bbs.y1 / h, bbs.x2 / w, bbs.y2 / h]]
                labels += [int(bbs.label)]
            image = img_aug  
            bboxes = np.array(bboxes)
            labels = np.array(labels)
        else:
            image, bboxes, labels = img, bbox, label

        # save to file for test 
        # test_image = image.astype(np.uint8)
        # test_bbox = bboxes * self.img_dim
        # test_bbox = test_bbox.astype(np.int32)
        # for i in range(len(test_bbox)):
        #     test_image = cv2.rectangle(test_image, (test_bbox[i][0], test_bbox[i][1]), 
        #         (test_bbox[i][2], test_bbox[i][3]), (0,255,0), 1)
        # cv2.imwrite("{}.jpg".format(self.index), test_image)
        # self.index += 1

        # -mean / svar to float 
        image = image.astype(np.float32) 
        image -= 127.5 
        image /= 127.5 
        
        # for test 
        # if (bboxes > 1.0).astype(np.int32).sum() > 0:
        #     aaa = 0
        #     aaa += 1
        # if (bboxes < 0).astype(np.int32).sum() > 0:
        #     aaa = 0
        #     aaa += 1

        bboxes[bboxes < 0] = 0 
        bboxes[bboxes > 1.0] = 1.0 
        
        # x1y1x2y2 -> cx,cy,w,h
        cx = (bboxes[:, 2] + bboxes[:, 0]) / 2.0
        cy = (bboxes[:, 3] + bboxes[:, 1]) / 2.0
        w = bboxes[:, 2] - bboxes[:, 0]
        h = bboxes[:, 3] - bboxes[:, 1]
        bboxes[:, 0] = cx
        bboxes[:, 1] = cy
        bboxes[:, 2] = w
        bboxes[:, 3] = h

        target = torch.zeros((bboxes.shape[0], 6))
        target[:, 2:] = torch.from_numpy(bboxes)
        target[:, 1] = torch.from_numpy(labels)

        #target = np.hstack((bboxes, np.expand_dims(labels, axis=1)))
        #print(target)

        return torch.from_numpy(image).permute(2, 0, 1), target
