#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.


from typing import Any, Callable, Dict, Optional, Union

from classy_vision.dataset import ClassyDataset, register_dataset

import os
import glob
import random
import numpy as np
import cv2
import imgaug.augmenters as iaa
import torchvision.transforms as transforms
from torchvision.datasets.folder import is_image_file


class _ReweightedDataset2(object):
    def __init__(self, res, img_dir, sources, weights, split):
        self.res = tuple(res)

        self.num_samples = []
        self.paths = []
        for src in sources:
            paths = sorted([x for x in glob.glob(os.path.join(img_dir, src, '**'), recursive=True) if is_image_file(x)])
            self.num_samples.append(len(paths))
            self.paths += paths
        self.weights = weights
        self.split = split

        print('Loading dataset:', split, sources, self.num_samples)

    def __getitem__(self, index):
        if self.split == 'train':
            cumsum = np.cumsum(self.weights)
            which = np.sum(cumsum/cumsum[-1] < random.random())
            index = sum(self.num_samples[:which]) + random.randint(0, self.num_samples[which]-1)

        img = cv2.imread(self.paths[index])
        cls_id = int(self.paths[index].split('/')[-2])

        if self.split == 'train':
            img = self.aug(img)
        img = self.transform(img)

        return {'input': img, 'target': cls_id}

    def __len__(self):
        return 1000 if self.split == 'train' else sum(self.num_samples)

    def transform(self, img):
        op = transforms.Compose([
            lambda x: cv2.resize(x, self.res, interpolation=cv2.INTER_AREA),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        ])
        return op(img)

    def aug(self, img):
        op = iaa.Sequential([
            iaa.PerspectiveTransform(scale=(0, 0.1), keep_size=False),
            iaa.Crop(percent=(0, 0.05), keep_size=False),
            iaa.Sometimes(
                0.5,
                iaa.GaussianBlur(sigma=(0, 0.5))
            ),
            iaa.LinearContrast((0.75, 1.5)),
            iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
            iaa.Multiply((0.8, 1.2), per_channel=0.2),
        ], random_order=True)
        return op(image=img)


@register_dataset("reweighted_dataset2")
class ReweightedDataset2(ClassyDataset):
    def __init__(
        self,
        batchsize_per_replica: int,
        shuffle: bool,
        num_samples: int,
        res,
        img_dir,
        sources,
        weights,
        split
    ) -> None:
        dataset = _ReweightedDataset2(res, img_dir, sources, weights, split)
        super().__init__(
            dataset, batchsize_per_replica, shuffle, None, num_samples
        )

    @classmethod
    def from_config(cls, config: Dict[str, Any]) -> "ReweightedDataset2":
        return cls(
            config.get('batchsize_per_replica'),
            config.get('use_shuffle'),
            config.get('num_samples'),
            config.get('res'),
            config.get('img_dir'),
            config.get('sources'),
            config.get('weights'),
            config.get('split'),
        )


if __name__ == '__main__':
    dataset = _ReweightedDataset2(
        res=(224, 224),

        img_dir='/home/lixuan/workspace/dataset/gz',
        # sources=["1", "2", "3", "4", "5", "6", "7", "8", "9"],
        # weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
        sources=["3"],
        weights=[1.0],
        split='train',
    )

    for i in range(len(dataset)):
        data = dataset[i]
        inp, label = data['input'], data['target']
        img = inp.numpy().transpose(1, 2, 0)
        img = ((img+1)*127.5).astype(np.uint8)

        cv2.imshow('img', img)
        if cv2.waitKey() == 27:
            break
