from random import shuffle
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms

from config import parser

args = parser.parse_args()

'''
1. 对图片进行按比例缩放
2. 对图片进行随机位置的截取
3. 对图片进行随机的水平和竖直翻转
4. 对图片进行随机角度的旋转
5. 对图片进行亮度、对比度和颜色的随机变化


数据增强最新的方式
1、Mixup
2、Cutout
3、Cutmix
4、Mosaic

'''


# 自己写Dataset至少需要有这样的格式
class Dataset(Dataset):
    def __init__(self, lines,type):
        super(Dataset, self).__init__()
        self.annotation_lines = lines
        self.type=type
        self.train_batches = len(self.annotation_lines)

    def __len__(self):
        return self.train_batches

    def __getitem__(self, index):
        # if index == 0:
        #     shuffle(self.annotation_lines)
        n = len(self.annotation_lines)
        index = index % n
        img, y = self.collect_image_label(self.annotation_lines[index])
        '''按照pytorch的标准写'''
        if self.type=='train':
            tran=transforms.Compose([
                # transforms.RandomHorizontalFlip(),
                # transforms.RandomRotation(30),
                # transforms.RandomPerspective(),
                transforms.Resize((224,224)),
                transforms.ToTensor(),
                transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
            ])
            temp_y = int(y) - 1
        else:
            tran = transforms.Compose([
                transforms.Resize((224, 224)),
                transforms.ToTensor(),
                transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
            ])
            temp_y = int(y) - 1
            temp_y=self.onehot_encode(temp_y)
        '''自定义'''
        # if args.use_aug:
        #     img = self.img_augment(img)
        '''图片预处理，标签预处理'''
        temp_img =tran(img)
        return temp_img.half(), temp_y

    def collect_image_label(self, line):
        line = line.split('*')
        image_path = "E:/Datasets/mosaic_blood/"+line[0].split('/')[-1]
        label = line[1]
        image = Image.open(image_path).convert("RGB")
        # image=Image.fromarray(image.numpy(),mode='L')

        return image, label

    def rand(self, a=0, b=1):
        return np.random.rand() * (b - a) + a

    def img_augment(self, image):

        # # 随机位置裁剪
        # random_crop = self.rand() < 0.5
        # # 中心裁剪
        # center_crop = self.rand() < 0.5
        # # 填充后随机裁剪
        # random_crop_padding = self.rand() < 0.5
        # 水平翻转
        h_flip = self.rand() < 0.5
        # 竖直翻转
        v_flip = self.rand() < 0.5
        # # 亮度
        # bright = self.rand() < 0.5
        # # 对比度
        # contrast = self.rand() < 0.5
        # # 饱和度
        # saturation = self.rand() < 0.5
        # # 颜色随机变换
        # color = self.rand() < 0.5
        # compose = self.rand() < 0.5
        # # 旋转30
        # rotate = self.rand() < 0.5

        if h_flip:
            image = transforms.RandomHorizontalFlip()(image)
        if v_flip:
            image = transforms.RandomVerticalFlip()(image)
        # if rotate:
        #     image = transforms.RandomRotation(30)(image)
        # if bright:
        #     image = transforms.ColorJitter(brightness=1)(image)
        # if contrast:
        #     image = transforms.ColorJitter(contrast=1)(image)
        # if saturation:
        #     image = transforms.ColorJitter(saturation=1)(image)
        # if color:
        #     image = transforms.ColorJitter(hue=0.5)(image)
        # if compose:
        #     image = transforms.ColorJitter(0.5, 0.5, 0.5)(image)
        # if random_crop:
        #     image = transforms.RandomCrop(100)(image)
        # if center_crop:
        #     image = transforms.CenterCrop(100)(image)
        # if random_crop_padding:
        #     image = transforms.RandomCrop(100, padding=8)(image)

        return image

    def onehot_encode(self, label, n_class=200):
        diag = torch.eye(n_class)
        oh_vector = diag[label].view(n_class)
        return oh_vector

    # # DataLoader中collate_fn使用
    # def dataset_collate(batch):
    #     images = []
    #     bboxes = []
    #     for img, box in batch:
    #         images.append(img)
    #         bboxes.append(box)
    #     images = np.array(images)
    #     bboxes = np.array(bboxes)
    #     return images, bboxes


class MixupDataset(Dataset):
    def __init__(self,dataset):
        self.dataset=dataset
        self.beta_dist=torch.distributions.beta.Beta(0.2,0.2)
    def __len__(self):
        return len(self.dataset)
    def __getitem__(self, index):
        if self.rand()<0.01:
            idx_a=index
            idx_b=np.random.randint(len(self))

            image_a,label_a=self.get_oneitem(idx_a)
            image_b,label_b=self.get_oneitem(idx_b)

            if label_a==label_b:
                image=image_a
                oh_label=self.onehot_encode(label_a)
            else:
                mix_rate=self.beta_dist.sample()
                if mix_rate<0.5:
                    mix_rate=1.-mix_rate

                image=mix_rate*image_a+(1.-mix_rate)*image_b
                oh_label=mix_rate*self.onehot_encode(label_a)+(1.-mix_rate)*self.onehot_encode(label_b)

            return image,oh_label
        else:

            return self.dataset[index][0],self.onehot_encode(self.dataset[index][1])

    def get_oneitem(self, idx):
        image = self.dataset[idx][0]
        label = self.dataset[idx][1]
        return image, label

    def onehot_encode(self, label, n_class=200):
        diag = torch.eye(n_class)
        oh_vector = diag[label].view(n_class)
        return oh_vector
    def rand(self, a=0, b=1):
        return np.random.rand() * (b - a) + a


if __name__ == "__main__":
    Dataset()