import random

from torchvision import transforms
from torchvision.transforms import RandomCrop,functional as F
from torchvision.transforms import RandomCrop,RandomRotation,RandomHorizontalFlip
from torch.utils.data import Dataset, DataLoader
import os
from PIL import Image
import numpy as np
import cv2
import albumentations as A
import cfg


class myDataSet(Dataset):

    def __init__(self, root_path,is_train=True):
        self.is_train=is_train
        self.file_name_list=os.listdir(root_path)
        self.root_path=root_path
        # self.RandomCropparam = RandomCrop(cfg.img_size, pad_if_needed=True)
        self.RandomHorizontalFlip = RandomHorizontalFlip()

    def __len__(self):
        return len(self.file_name_list)
    
    def letterbox(self,img, new_shape=(640, 640), color=(0, 0, 0), auto=False, scaleFill=False, scaleup=True, stride=32):
        
        # Resize and pad image while meeting stride-multiple constraints
        shape = img.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        # if not scaleup:  # only scale down, do not scale up (for better test mAP)
        #     r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = (new_shape[1], new_shape[0])
            ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return img, ratio, (dw, dh)


    def __getitem__(self, index):
        while(True):
            mix_data=np.load(os.path.join(self.root_path,self.file_name_list[index]))
            img=mix_data[0]
            mask=mix_data[1]
            
            # mask=255-mask
            # mask=np.where(mask>0,255,mask)
            if(img.shape[-1]!=3):
                index=random.randint(0,self.__len__())
                print("---------------------------")
                continue
            else:
                assert len(img.shape)==3,f"{self.file_name_list[index]}"
                assert len(mask.shape)==3,f"{self.file_name_list[index]}"

                mask=Image.fromarray(mask[...,::-1]).convert("L")
                mask=255-np.array(mask)
                _,binary=cv2.threshold(mask,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
                mask=np.repeat(binary[...,None],3,axis=-1)
                mask=mask/255
                break
            

        if(0):
            h,w,_=img.shape
            rate=0.7
            n_h=int(h*rate)
            n_w=int(w*rate)
            print("-----------------------",self.file_name_list[index])
            cv2.imshow("img",cv2.resize(img,(n_w,n_h)))
            cv2.imshow("mask",cv2.resize((mask),(n_w,n_h)))
            cv2.waitKey(0)

        if self.is_train:
            img=self.img_enhance(img)
      

        binary = transforms.ToTensor()(mask)
        _img = self.img2tensor(img)

        return _img, binary,img,index

    def img2tensor(self, x):
        if len(np.array(x).shape) == 3:
            return transforms.Compose([

                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])(x)
        elif len(np.array(x).shape) == 2:
            return transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[349.], std=[677.])
            ])(x)

    def img_enhance(self, img):
        # 定义一系列变换
        trans = A.Compose([
            A.RandomBrightnessContrast(p=0.5),
            # A.ISONoise(color_shift=(0.01, 0.05), intensity=(0.1, 0.5), always_apply=False, p=0.5),
            # A.GaussianBlur(p=0.2),
            # A.RandomRain(p=0.5)
        ]
        )

        # 做变换，变换后的结果是以字典的形式返回的。
        transformed = trans(image=img)
        # 取得变换后的img、keypoints
        img = transformed["image"]
        return np.array(img)


if __name__ == '__main__':
    # dataDir = cfg.train_path
    dataDir = 'data/train0_crop'

    data = myDataSet(dataDir)

    dataloader = DataLoader(data, 3, shuffle=True)

    for i, (x, y,_,_) in enumerate(dataloader):
        print(x.shape)
        print(y.shape)
        # (transforms.ToPILImage()(x[3])).show()
        # (transforms.ToPILImage()(y[3])).show()
        # exit()
        # break
