import numpy as np
from torchvision import datasets, transforms
from utils.toolkit import split_images_labels
from torchvision.transforms.functional import InterpolationMode
import os


class iData(object):
    train_trsf = []
    test_trsf = []
    common_trsf = []
    class_order = []


class iCIFAR10(iData):
    use_path = False
    train_trsf = [
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ColorJitter(brightness=63 / 255),
    ]
    test_trsf = []
    common_trsf = [
        transforms.ToTensor(),
        transforms.Normalize(
            mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)
        ),
    ]

    class_order = np.arange(10).tolist()

    def download_data(self):
        train_dataset = datasets.cifar.CIFAR10(
            "~/workspace/datasets/", train=True, download=True
        )
        test_dataset = datasets.cifar.CIFAR10(
            "~/workspace/datasets/", train=False, download=True
        )
        self.train_data, self.train_targets = train_dataset.data, np.array(
            train_dataset.targets
        )
        self.test_data, self.test_targets = test_dataset.data, np.array(
            test_dataset.targets
        )


class iCIFAR100(iData):
    use_path = False
    train_trsf = [
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=63 / 255),
    ]
    test_trsf = []
    common_trsf = [
        transforms.ToTensor(),
        transforms.Normalize(
            mean=(0.5071, 0.4867, 0.4408), std=(0.2675, 0.2565, 0.2761)
        ),
    ]

    class_order = np.arange(100).tolist()

    def download_data(self):
        train_dataset = datasets.cifar.CIFAR100(
            "~/workspace/datasets/", train=True, download=True
        )
        test_dataset = datasets.cifar.CIFAR100(
            "~/workspace/datasets/", train=False, download=True
        )
        self.train_data, self.train_targets = train_dataset.data, np.array(
            train_dataset.targets
        )
        self.test_data, self.test_targets = test_dataset.data, np.array(
            test_dataset.targets
        )


class iCIFAR100_224(iCIFAR100):
    train_trsf = [
        transforms.RandomResizedCrop(224, interpolation=InterpolationMode.BICUBIC),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=63 / 255),
    ]
    test_trsf = [
        transforms.Resize(256, interpolation=InterpolationMode.BICUBIC),
        transforms.CenterCrop(224),
    ]


def build_transform(is_train, input_size=224):
    resize_im = input_size > 32
    if is_train:
        scale = (0.05, 1.0)
        ratio = (3.0 / 4.0, 4.0 / 3.0)

        transform = [
            transforms.RandomResizedCrop(input_size, scale=scale, ratio=ratio),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.ToTensor(),
        ]
        return transform

    t = []
    if resize_im:
        size = int((256 / 224) * input_size)
        t.append(
            transforms.Resize(
                size, interpolation=InterpolationMode.BICUBIC
            ),  # to maintain same ratio w.r.t. 224 images
        )
        t.append(transforms.CenterCrop(input_size))
    t.append(transforms.ToTensor())

    return t


class iCIFAR224(iData):
    use_path = False

    def __init__(self, args):
        super().__init__()
        self.args = args
        input_size = 224 if not args.cam_visual else 1024
        self.train_trsf = build_transform(True, input_size)
        self.test_trsf = build_transform(False, input_size)
        self.common_trsf = []

        self.class_order = np.arange(100).tolist()

    def download_data(self):
        train_dataset = datasets.cifar.CIFAR100(
            "~/workspace/datasets/", train=True, download=True
        )
        test_dataset = datasets.cifar.CIFAR100(
            "~/workspace/datasets/", train=False, download=True
        )
        self.train_data, self.train_targets = train_dataset.data, np.array(
            train_dataset.targets
        )
        self.test_data, self.test_targets = test_dataset.data, np.array(
            test_dataset.targets
        )


class iImageNet1000(iData):
    use_path = True
    train_trsf = [
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=63 / 255),
    ]
    test_trsf = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
    ]
    common_trsf = [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ]

    class_order = np.arange(1000).tolist()

    def download_data(self):
        assert 0, "You should specify the folder of your dataset"
        train_dir = "[DATA-PATH]/train/"
        test_dir = "[DATA-PATH]/val/"

        train_dset = datasets.ImageFolder(train_dir)
        test_dset = datasets.ImageFolder(test_dir)

        self.train_data, self.train_targets = split_images_labels(train_dset.imgs)
        self.test_data, self.test_targets = split_images_labels(test_dset.imgs)


class iImageNet100(iData):
    use_path = True
    train_trsf = [
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
    ]
    test_trsf = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
    ]
    common_trsf = [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ]

    class_order = np.arange(1000).tolist()

    def download_data(self):
        assert 0, "You should specify the folder of your dataset"
        train_dir = "[DATA-PATH]/train/"
        test_dir = "[DATA-PATH]/val/"

        train_dset = datasets.ImageFolder(train_dir)
        test_dset = datasets.ImageFolder(test_dir)

        self.train_data, self.train_targets = split_images_labels(train_dset.imgs)
        self.test_data, self.test_targets = split_images_labels(test_dset.imgs)


class iImageNetR(iData):
    use_path = True

    def __init__(self, args):
        super().__init__()
        self.args = args
        input_size = 224 if not args.cam_visual else 1024
        self.train_trsf = build_transform(True, input_size)
        self.test_trsf = build_transform(False, input_size)
        self.common_trsf = []

        self.class_order = np.arange(200).tolist()

    def download_data(self):
        # assert 0, "You should specify the folder of your dataset"
        train_dir = "~/workspace/datasets/imagenet-r/train/"
        test_dir = "~/workspace/datasets/imagenet-r/test/"

        train_dset = datasets.ImageFolder(train_dir)
        test_dset = datasets.ImageFolder(test_dir)

        self.train_data, self.train_targets = split_images_labels(train_dset.imgs)
        self.test_data, self.test_targets = split_images_labels(test_dset.imgs)


class iImageNetA(iData):
    use_path = True

    def __init__(self, args):
        super().__init__()
        self.args = args
        input_size = 224 if not args.cam_visual else 1024
        self.train_trsf = build_transform(True, input_size)
        self.test_trsf = build_transform(False, input_size)
        self.common_trsf = []

        self.class_order = np.arange(200).tolist()

    def download_data(self):
        # assert 0, "You should specify the folder of your dataset"
        train_dir = "~/workspace/datasets/imagenet-a/train/"
        test_dir = "~/workspace/datasets/imagenet-a/test/"

        train_dset = datasets.ImageFolder(train_dir)
        test_dset = datasets.ImageFolder(test_dir)

        self.train_data, self.train_targets = split_images_labels(train_dset.imgs)
        self.test_data, self.test_targets = split_images_labels(test_dset.imgs)


class CUB(iData):
    use_path = True

    def __init__(self, args):
        super().__init__()
        self.args = args
        input_size = 224 if not args.cam_visual else 1024
        self.train_trsf = build_transform(True, input_size)
        self.test_trsf = build_transform(False, input_size)
        self.common_trsf = []
        self.class_order = np.arange(200).tolist()

    def download_data(self):
        # assert 0, "You should specify the folder of your dataset"
        train_dir = "~/workspace/datasets/cub/train/"
        test_dir = "~/workspace/datasets/cub/test/"

        train_dset = datasets.ImageFolder(train_dir)
        test_dset = datasets.ImageFolder(test_dir)

        self.train_data, self.train_targets = split_images_labels(train_dset.imgs)
        self.test_data, self.test_targets = split_images_labels(test_dset.imgs)


class iDomainNet(iData):
    use_path = True
    # train_trsf = [
    #     transforms.RandomResizedCrop(224),
    #     transforms.RandomHorizontalFlip(),
    # ]
    # test_trsf = [
    #     transforms.Resize(256),
    #     transforms.CenterCrop(224),
    # ]
    # common_trsf = [
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    # ]

    def __init__(self, args):
        self.args = args
        input_size = 224 if not args.cam_visual else 1024
        self.train_trsf = build_transform(True, input_size)
        self.test_trsf = build_transform(False, input_size)
        self.common_trsf = []

        class_order = np.arange(6 * 345).tolist()
        self.class_order = class_order
        self.domain_names = [
            "clipart",
            "infograph",
            "painting",
            "quickdraw",
            "real",
            "sketch",
        ]
        self.data_path = os.path.expanduser("~/workspace/datasets/DomainIL/DomainNet")

    def download_data(self):
        self.image_list_root = os.path.expanduser(self.data_path)

        image_list_paths = [
            os.path.join(self.image_list_root, d + "_" + "train" + ".txt")
            for d in self.domain_names
        ]
        imgs = []
        for taskid, image_list_path in enumerate(image_list_paths):
            image_list = open(image_list_path).readlines()
            imgs += [
                (val.split()[0], int(val.split()[1]) + taskid * 345)
                for val in image_list
            ]
        train_x, train_y = [], []
        for item in imgs:
            train_x.append(os.path.join(self.image_list_root, item[0]))
            train_y.append(item[1])
        self.train_data = np.array(train_x)
        self.train_targets = np.array(train_y)

        image_list_paths = [
            os.path.join(self.image_list_root, d + "_" + "test" + ".txt")
            for d in self.domain_names
        ]
        imgs = []
        for taskid, image_list_path in enumerate(image_list_paths):
            image_list = open(image_list_path).readlines()
            imgs += [
                (val.split()[0], int(val.split()[1]) + taskid * 345)
                for val in image_list
            ]
        train_x, train_y = [], []
        for item in imgs:
            train_x.append(os.path.join(self.image_list_root, item[0]))
            train_y.append(item[1])
        self.test_data = np.array(train_x)
        self.test_targets = np.array(train_y)


class iGanFake(object):
    use_path = True
    train_trsf = [
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=63 / 255),
    ]
    test_trsf = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
    ]
    common_trsf = [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ]

    def __init__(self, args):
        self.args = args
        # input_size = 224 if not args.cam_visual else 1024
        self.data_path = os.path.expanduser("~/workspace/datasets/DomainIL/CDDB")
        self.class_order = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
        # Domain specific
        self.task_name = ["gaugan", "biggan", "cyclegan", "imle", "deepfake", "crn", "wild"]
        self.multiclass = [0, 0, 1, 0, 0, 0, 0]

    def download_data(self):

        train_dataset = []
        test_dataset = []
        for id, name in enumerate(self.task_name):
            root_ = os.path.join(self.data_path, name, "train")
            sub_classes = os.listdir(root_) if self.multiclass[id] else [""]
            for cls in sub_classes:
                for imgname in os.listdir(os.path.join(root_, cls, "0_real")):
                    train_dataset.append(
                        (
                            os.path.join(root_, cls, "0_real", imgname),
                            0 + 2 * id,
                        )
                    )
                for imgname in os.listdir(os.path.join(root_, cls, "1_fake")):
                    train_dataset.append(
                        (
                            os.path.join(root_, cls, "1_fake", imgname),
                            1 + 2 * id,
                        )
                    )

        for id, name in enumerate(self.task_name):
            root_ = os.path.join(self.data_path, name, "val")
            sub_classes = os.listdir(root_) if self.multiclass[id] else [""]
            for cls in sub_classes:
                for imgname in os.listdir(os.path.join(root_, cls, "0_real")):
                    test_dataset.append(
                        (
                            os.path.join(root_, cls, "0_real", imgname),
                            0 + 2 * id,
                        )
                    )
                for imgname in os.listdir(os.path.join(root_, cls, "1_fake")):
                    test_dataset.append(
                        (
                            os.path.join(root_, cls, "1_fake", imgname),
                            1 + 2 * id,
                        )
                    )

        self.train_data, self.train_targets = split_images_labels(train_dataset)
        self.test_data, self.test_targets = split_images_labels(test_dataset)
