#!/usr/bin/env python
# -*- coding: utf-8 -*-
#  @Time    : 2021-02-08 23:19
#  @Author  : lifan
#  @File    : dataset.py
#  @Software: PyCharm
# @Brief   :

import torch

from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms as T
from albumentations import (Blur,Flip,ShiftScaleRotate,GridDistortion,ElasticTransform,HorizontalFlip,CenterCrop,
                            HueSaturationValue,Transpose,RandomBrightnessContrast,CLAHE,RandomCrop,Cutout,CoarseDropout,
                            CoarseDropout,Normalize,ToFloat,OneOf,Compose,Resize,RandomRain,RandomFog,Lambda
                            ,ChannelDropout,GaussNoise,ISONoise,VerticalFlip,RandomGamma,RandomRotate90, RandomContrast, RandomBrightness,
                            OpticalDistortion)
import albumentations as A
import cv2
from albumentations.pytorch import ToTensor
from albumentations.pytorch import ToTensorV2
import numpy as np
# congfig
# IMAGE_SIZE = (512, 512)


class MyDataset(Dataset):

    def __init__(self, fold: int, mode: str, images_size: int):
        super(MyDataset, self).__init__()
        self.fold = fold
        self.mode = mode
        self.image_size = images_size
        self.transforms = self.get_transforms(mode, images_size)
        assert self.fold in [1,2,3,4,5,6,7,8,9,10], \
            "fold should be an integer number between 1 and 10, but got {}".format(self.fold)

        assert self.mode in ["train", "val"], \
            "mode should be 'train' or 'val', but got {}".format(self.mode)

        self.images, self.labels = [], []

        if self.mode == 'train':
            with open("/home/fanli/ci2p_dl_code/YingyingHan_UNetSeg_Experiment/natural_images/dataloader/txtpath/natural_images_train_dataset.txt", 'r') as f:
                for line in f.readlines():
                    image, label = line.strip().split(',')
                    self.images.append(image)
                    self.labels.append(label)
        if self.mode == 'val':
            with open("/home/fanli/ci2p_dl_code/YingyingHan_UNetSeg_Experiment/natural_images/dataloader/txtpath/natural_images_val_dataset.txt", 'r') as f:
                for line in f.readlines():
                    image, label = line.strip().split(',')
                    self.images.append(image)
                    self.labels.append(label)

        # if self.mode == 'train':
        #     with open(r"E:\DeepLearning_CV\DeepLearning\深度学习\HYY_unet\dataloader\txtpath\fold_{}_train_dataset.txt".format(self.fold), 'r') as f:
        #         for line in f.readlines():
        #             image, label = line.strip().split(',')
        #             self.images.append(image)
        #             self.labels.append(label)
        # if self.mode == 'val':
        #     with open(r"E:\DeepLearning_CV\DeepLearning\深度学习\HYY_unet\dataloader\txtpath\fold_{}_val_dataset.txt".format(self.fold), 'r') as f:
        #         for line in f.readlines():
        #             image, label = line.strip().split(',')
        #             self.images.append(image)
        #             self.labels.append(label)

    def get_transforms(self, mode, images_size):
        list_transforms = []
        if mode == "train":
            list_transforms.extend(
                [
                     #HorizontalFlip(p=0.5),
                     ShiftScaleRotate(
                        shift_limit=0,  # no resizing
                        scale_limit=0.1,
                        rotate_limit=10, # rotate
                        p=0.5,
                        border_mode=cv2.BORDER_CONSTANT
                    ),
                    #GaussNoise(),
                    # OneOf([
                    #     RandomContrast(),
                    #     RandomGamma(),
                    #     RandomBrightness(),
                    #      ], p=0.3),
                    # OneOf([
                    #     ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    #     GridDistortion(),
                    #     OpticalDistortion(distort_limit=2, shift_limit=0.5),
                    #     ], p=0.3),
                    # A.MultiplicativeNoise(multiplier=1.5, p=1),
                ]
            )
        list_transforms.extend(
            [
                Resize(images_size, images_size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1),
                ToTensor(),
                # ToTensorV2(p=1.0),
            ]
        )

        list_trfms = Compose(list_transforms, p=1.)
        return list_trfms

    def _load_img(self, path, transforms=[]):

        image = Image.open(path)

        return T.Compose([
                             T.Resize((self.image_size,self.image_size))
                         ] + transforms)(image)

    def __getitem__(self, idx):

        if self.mode == "train":
            image = cv2.imread(self.images[idx])
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            label = cv2.imread(self.labels[idx])  # 先读取三通道，得到BGR格式的
            label = cv2.cvtColor(label, cv2.COLOR_BGR2GRAY) # 转化为灰度图

            augmented = self.transforms(image=image, mask=label)
            image = augmented['image']
            label = augmented['mask']


            # c = self._load_img(self.labels[idx],
            #                               transforms=[
            #                                   T.ToTensor(),
            #                               ])  # 加载Label图像

        if self.mode == "val":
            image = cv2.imread(self.images[idx])
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            label = cv2.imread(self.labels[idx])# 先读取三通道，得到BGR格式的
            label = cv2.cvtColor(label, cv2.COLOR_BGR2GRAY)# 转化为灰度图

            augmented = self.transforms(image=image, mask=label)
            image = augmented['image']
            label = augmented['mask']

            # label = self._load_img(self.labels[idx],
            #                               transforms=[
            #                                   T.ToTensor(),
            #                               ])  # 加载Label图像

        return image, label, self.labels[idx]

    def __len__(self):
        """
        返回数据集总数
        """
        return len(self.images)

# train_dataset = MyDataset(fold=1, mode="train", images_size=256)
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=0)
#
# img, lable = next(iter(train_loader))
# print(img.shape, lable.shape)  # torch.Size([1, 3, 512, 512]) torch.Size([1, 5, 512, 512])