import warnings
from PIL.Image import DecompressionBombWarning
warnings.simplefilter("ignore", FutureWarning)
import numpy as np
import os
import io
import time
import sys
from PIL import Image
import torchvision

from tqdm import tqdm
import cv2
import torch
from torch.utils.data import *
from albumentations.pytorch import ToTensor, ToTensorV2
from albumentations import (
    Compose, HorizontalFlip, CLAHE, HueSaturationValue, Normalize, RandomBrightnessContrast,
    RandomBrightness, RandomContrast, RandomGamma, OneOf, Resize, ImageCompression, Rotate,
    ToFloat, ShiftScaleRotate, GridDistortion, ElasticTransform, JpegCompression, Cutout, GridDropout,
    RGBShift, RandomBrightness, RandomContrast, Blur, MotionBlur, MedianBlur, GaussNoise, CenterCrop,
    IAAAdditiveGaussianNoise, OpticalDistortion, RandomSizedCrop, VerticalFlip, GaussianBlur, CoarseDropout,
    PadIfNeeded, ToGray, FancyPCA)
from catalyst.data.sampler import BalanceClassSampler
from sklearn.model_selection import train_test_split

warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", DecompressionBombWarning)


def get_train_transforms(size=300):
    return Compose([
        Resize(height=size, width=size),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        # GaussNoise(p=0.1),
        # GaussianBlur(blur_limit=3, p=0.05),
        PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
        OneOf([RandomBrightnessContrast(), HueSaturationValue()], p=0.5),  # FancyPCA(),
        OneOf([CoarseDropout(), GridDropout()], p=0.2),
        ToGray(p=0.2),
        ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2()
    ]
    )

def get_valid_transforms(size=300):
    return Compose([
            Resize(height=size, width=size, p=1.0),
            PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
            Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensorV2(p=1.0),
        ], p=1.0)

def one_hot(size, target):
    vec = torch.zeros(size, dtype=torch.float32)
    vec[target] = 1.
    return vec

def load_image_paths_labels(root_path='/raid/chenby/ACCV/train', data_type='train'):
    image_paths = []
    image_labels = []
    class_paths = sorted(os.listdir(root_path))
    for i, class_name in enumerate(class_paths):
        img_names = os.listdir(os.path.join(root_path, class_name))
        # print(i, class_name, len(img_names))
        for img_name in img_names:
            img_p = os.path.join(root_path, class_name, img_name)
            image_paths.append(img_p)
            image_labels.append(i)

    x_train, x_test, y_train, y_test = train_test_split(image_paths, image_labels, test_size=0.2, random_state=2020)
    print('train:', len(x_train), 'test:', len(x_test))
    if data_type == 'train':
        return x_train, y_train
    else:
        return x_test, y_test

    # return image_paths, image_labels

def clean_data(image_paths, image_labels, data_type='train'):
    clean_image_paths = []
    clean_image_labels = []
    count = 0
    for i, image_path in enumerate(image_paths):
        if i % 1000 == 0:
            print(i, i/len(image_paths))
        try:
            # image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            # if image.shape[2] != 3:
            #     print(image.shape, image_path)
            # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
            img = Image.open(image_path)
            # if len(img.split()) != 3 or img.mode != 'RGB':
            #     print(i, img.size, image_path, img.mode)
            #     if img.mode != 'RGB':
            #         img = img.convert("RGB")
            #         print(np.array(img).shape)
            if img.mode == 'RGB':
                clean_image_paths.append(image_path)
                clean_image_labels.append(image_labels[i])
        except:
            count += 1
            print(i, len(image_paths), count, image_path, 'ERROR')
    print('total:', count)
    clean_image_paths = np.array(clean_image_paths)
    np.save('./npy/' + data_type + '_paths.npy', clean_image_paths)
    clean_image_labels = np.array(clean_image_labels)
    np.save('./npy/' + data_type + '_labels.npy', clean_image_labels)

def load_data_from_npy(data_type='train'):
    image_paths = np.load('/data1/cby/py_project/ACCV/datasets/npy/'+ data_type + '_paths.npy')
    image_labels = np.load('/data1/cby/py_project/ACCV/datasets/npy/'+ data_type + '_labels.npy')
    print(image_labels.shape, image_paths.shape)
    return image_paths, image_labels

def read_image(image_path):
    try:
        image = Image.open(image_path)
        if image.mode != 'RGB':
            # print(image_path, image.mode)
            image = image.convert("RGB")
        image = np.array(image, dtype=np.float32)
    except:
        try:  # 可能有些图片在Image读取出错，可以cv2读
            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
        except:
            print(image_path)

    return image

class ACCVDataset(Dataset):

    def __init__(self, root_path='/raid/chenby/ACCV/train', is_one_hot=False, transforms=None,
                 classes_num=5000, data_type='train', is_load_from_npy=False):
        super().__init__()
        self.classes_num = classes_num
        self.transforms = transforms
        self.is_one_hot = is_one_hot
        if not is_load_from_npy:
            self.images, self.labels = load_image_paths_labels(root_path, data_type)
        else:
            self.images, self.labels = load_data_from_npy(data_type=data_type)
        # self.images, self.labels = self.images[353280:], self.labels[353280:]
        # print(len(self.images), self.images[0])

    def __getitem__(self, index: int):
        label = self.labels[index]
        image_path = self.images[index]
        image = read_image(image_path)
        if self.transforms:
            sample = {'image': image}
            sample = self.transforms(**sample)
            image = sample['image']

        if self.is_one_hot:
            label = one_hot(self.classes_num, label)

        return image, label

    def __len__(self) -> int:
        return len(self.images)

    def get_labels(self):
        return list(self.labels)

class ACCVDatasetSubmission(Dataset):

    def __init__(self, root_path='/raid/chenby/ACCV/test', is_one_hot=False, transforms=None,
                 classes_num=5000):
        super().__init__()
        self.classes_num = classes_num
        self.transforms = transforms
        self.is_one_hot = is_one_hot
        self.image_names = sorted(os.listdir(root_path))
        self.images = []
        for name in self.image_names:
            self.images.append(os.path.join(root_path, name))

    def __getitem__(self, index: int):
        image_path = self.images[index]
        image = read_image(image_path)
        # image_name = image_path.split('/')[-1]
        image_name = self.image_names[index]
        # print(image_name)
        if self.transforms:
            sample = {'image': image}
            sample = self.transforms(**sample)
            image = sample['image']

        return image, image_name

    def __len__(self) -> int:
        return len(self.images)


if __name__ == '__main__':
    # image_paths, image_labels = load_image_paths_labels(data_type='val')
    # clean_data(image_paths, image_labels, data_type='val')
    # load_data_from_npy(data_type='val')

    start = time.time()
    xdl = ACCVDataset(transforms=get_valid_transforms(size=224), is_one_hot=False, data_type='train',
                      is_load_from_npy=True)
    # xdl = ACCVDatasetSubmission(transforms=get_valid_transforms(size=224), is_one_hot=False, data_type='train')

    print('length:', len(xdl))
    # train_loader = DataLoader(xdl, batch_size=128, shuffle=False, num_workers=4,)
    #                           #sampler=BalanceClassSampler(labels=xdl.get_labels(), mode="downsampling"))
    train_loader = DataLoader(xdl, batch_size=128, shuffle=False, num_workers=4,
                              sampler=BalanceClassSampler(labels=xdl.get_labels(), mode="upsampling"))
    for i, (img, label) in enumerate(train_loader):
        print(i, '/', len(train_loader), img.shape, label.shape)
        if i == 20:
            break
    # for i, (img, image_name) in enumerate(train_loader):
    #     print(i, '/', len(train_loader), img.shape, len(list(image_name)), type(image_name))
    #     if i == 10:
    #         break
    end = time.time()
    print('end iterate')
    print('DataLoader total time: %fs' % (end - start))

    # image_path = '/raid/chenby/ACCV/train/4999/4999_76e5f1acc0411917572bec4f9590c8bdfcd63eab.jpg'
    # image = Image.open(image_path)
    # print(image.mode)
    # if image.mode != 'RGB':
    #     image = image.convert("RGB")
    # image = np.array(image, dtype=np.float32)
    # print(image.shape)
    #
    # image_2 = cv2.imread(image_path, cv2.IMREAD_COLOR)
    # image_2 = cv2.cvtColor(image_2, cv2.COLOR_BGR2RGB).astype(np.float32)
    # print(image_2.shape)
    # print(np.unique(image_2 - image))


    pass
