import torch
from torch.utils.data import Dataset
from torchvision import transforms

import cv2
from PIL import Image
import numpy as np
import csv
from collections import Counter
from random import shuffle, sample

from path import *
from config import args
from logger import logger


def read_csv(split='train'):
    """
    load label
    """
    if split == 'train':
        data_path = DATA_PATH
        csv_name = 'train.csv'
    elif split == 'test':
        data_path = os.path.dirname(DATA_PATH)
        csv_name = 'validation.csv'
    csv_path = os.path.join(data_path, csv_name)
    logger.logger.info(f'reading {csv_path}')
    with open(csv_path) as f:
        f_csv = csv.reader(f)
        f_csv = [row for row in f_csv][1:]
    return f_csv


def split_dataset(labels, train_ratio=args.train_ratio):
    """
    split train and validation data
    """
    if train_ratio == 1:
        return labels, labels

    split_by_class = [[] for _ in range(args.class_num)]
    for label in labels:
        split_by_class[int(label[1])].append(label)
    labels_train = []
    labels_val = []

    for i in range(args.class_num):
        len_class_i = len(split_by_class[i])
        num_train = int(len_class_i * train_ratio)

        idx_all = set(range(len_class_i))
        idx_train = sample(idx_all, num_train)
        idx_val = idx_all.difference(set(idx_train))
        assert set(idx_train).intersection(idx_val) == set()

        labels_train += [split_by_class[i][j] for j in idx_train]
        labels_val += [split_by_class[i][j] for j in idx_val]

    return labels_train, labels_val


labels_train, labels_val = split_dataset(read_csv())
if not args.train_flyai:
    labels_test = read_csv(split='test')


class BaldClassificationDataset(Dataset):
    def __init__(self, split='train', data_path=DATA_PATH, arguments=args):

        self.path = data_path
        if split == 'test' and not args.train_flyai:
            self.path = os.path.dirname(self.path)

        self.args = arguments
        if self.args.train_flyai:
            assert split in ['train', 'val']
        else:
            assert split in ['train', 'val', 'test']
        self.split = split

        self.labels = eval(f'labels_{split}')
        self.len_dataset = len(self.labels)
        self.label_count = self._count_label(self)
        self.img_list = [os.path.join(self.path, _[0]) for _ in self.labels]
        self.img_num = len(self.img_list)

        self.imgs = self.stack_images()  # (num, h, w, 3), 0~255
        self.mean, self.std = self.get_img_stat()  # BGR
        self.transform = self.get_transform()

    def stack_images(self):
        n_img = len(self.img_list)
        imgs = np.zeros((n_img, self.args.height, self.args.width, 3))
        for index in range(n_img):
            img_path = self.img_list[index]
            img = cv2.imread(img_path).astype(np.float)
            img = cv2.resize(img, (self.args.width, self.args.height), interpolation=cv2.INTER_LINEAR)
            imgs[index] = img
        return imgs

    def get_img_stat(self):
        """
        return mean and std of training set
        """
        if self.split == 'val':
            mean, std = np.load(MEAN_PATH), np.load(STD_PATH)
            return mean, std

        mean = self.imgs.reshape((-1, 3)).mean(0) / 255
        std = self.imgs.reshape((-1, 3)).std(0) / 255

        np.save(MEAN_PATH, mean)
        np.save(STD_PATH, std)

        return mean, std

    def get_transform(self):
        random_resized_crop = transforms.RandomResizedCrop(size=(self.args.height, self.args.width),
                                                           scale=(self.args.t_rrc_scale_m, self.args.t_rrc_scale_M))
        if self.split == 'train':
            # rand_erase = transforms.RandomErasing(scale=(self.args.t_scale_m, self.args.t_scale_M),
            #                                       ratio=(self.args.t_ratio_m, self.args.t_ratio_M),
            #                                       value=self.args.t_value)

            transform = transforms.Compose([transforms.RandomHorizontalFlip(),
                                            random_resized_crop,
                                            transforms.ColorJitter(args.t_brightness, args.t_contrast,
                                                                   args.t_saturation, args.t_hue),
                                            # transforms.RandomGrayscale(),
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean=self.mean, std=self.std)
                                            ])
        elif self.split == 'val':
            transform = transforms.Compose([transforms.RandomHorizontalFlip(),
                                            random_resized_crop,
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean=self.mean, std=self.std)])
        elif self.split == 'test':
            transform = transforms.Compose([transforms.ToTensor(),
                                            transforms.Normalize(mean=self.mean, std=self.std)])

        return transform

    @staticmethod
    def _split_dataset(self):
        """
        split train and validation data
        """
        if self.args.train_ratio == 1:
            return self.labels

        split_by_class = [[] for _ in range(self.args.class_num)]
        for label in self.labels:
            split_by_class[int(label[1])].append(label)

        labels_train = []
        labels_val = []

        for i in range(self.args.class_num):
            num_train = int(len(split_by_class[i]) * self.args.train_ratio)
            shuffle(split_by_class[i])
            if self.split == 'train':
                labels_train += split_by_class[i][:num_train]
            else:
                labels_val += split_by_class[i][num_train:]

        if self.split == 'train':
            for _ in range(self.args.shuffle_num):
                shuffle(labels_train)
            return labels_train
        else:
            return labels_val

    @staticmethod
    def _read_csv(self):
        """
        load label
        """

        with open(self.csv_path) as f:
            f_csv = csv.reader(f)
            f_csv = [row for row in f_csv][1:]

        return f_csv

    @staticmethod
    def _count_label(self):
        """
        count number of labels
        """
        labels = [_[1] for _ in self.labels]
        label_counter = Counter(labels)

        info = [f'dataset type: {args.prefix}{self.split}{args.suffix}', f'img num: {len(self)}'] + \
               [f'label: {i} num: {label_counter[i]}({label_counter[i] / len(self) * 100:.2f}%)'
                for i in label_counter]
        logger.logger.info('\n'.join(info))

        return label_counter

    def __len__(self):

        return self.len_dataset

    def __getitem__(self, index):

        img_path = self.img_list[index]
        img = self.imgs[index]
        img = Image.fromarray(img.astype('uint8')).convert('RGB')
        img = self.transform(img)

        label = int(self.labels[index][1])
        if not args.use_focal:
            label = torch.zeros(self.args.class_num).scatter_(0, torch.LongTensor([label]), torch.Tensor([1]))
            if self.args.do_label_smoothing:
                label = (1.0 - self.args.label_smoothing) * label + \
                        self.args.label_smoothing / self.args.class_num

        return img, label, img_path


if __name__ == '__main__':
    from torch.utils.data import DataLoader
    # dataset = BaldClassificationDataset(split='train')
    dataset = BaldClassificationDataset(split='val')
    # dataset = BaldClassificationDataset(split='train')
    # logger.logger.info(dataset.mean, dataset.std)
    # logger.logger.info(dataset.get_img_stat())
    # dataloader = DataLoader(dataset, batch_size=1, shuffle=True)

    # for data in dataset:
    #     logger.logger.info(data[1])
