import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as D

import torchvision
from torchvision import transforms as T

import cv2
import os
import numpy as np
import albumentations as A
import matplotlib.pyplot as plt
from tqdm import tqdm

class HubDataset(D.Dataset):

    def __init__(self, root_dir, transform=None,
                 valid_mode = True,
                 imgsize=(512, 512), window=1024, overlap=256, threshold = 200):
        self.path = root_dir
        self.overlap = overlap
        self.window = window
        self.transform = transform
        self.threshold = threshold
        self.imgsize = imgsize
        self.valid_mode = valid_mode
        
        self.build_Transform()
        self.build_slices()
        self.len = len(self.images)

    def build_Transform(self):
        self.as_tensor = T.Compose([
            T.ToTensor(),
            T.Normalize([0.485, 0.456, 0.406],
                        [0.229, 0.224, 0.225]),
        ])
        self.resizefunc = A.Compose([
            A.Resize(self.imgsize[0], self.imgsize[1])
        ])
        if self.transform is None:
            self.transform = A.Compose([
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                # A.RandomRotate90(p=0.5),
                # A.ChannelShuffle(p=0.25),
            
                A.OneOf([
                        A.HueSaturationValue(12,12,12, p=0.5),
                        A.CLAHE(clip_limit=1, p=0.5),
                        A.RandomBrightnessContrast(brightness_limit=0.05, contrast_limit=0.05, p=0.5),
                    ], p=0.5),
                
                # A.OneOf([
                #     A.RandomContrast(),
                #     A.RandomGamma(),
                #     A.RandomBrightness(),
                #     A.ColorJitter(brightness=0.07, contrast=0.07,
                #             saturation=0.1, hue=0.1, always_apply=False, p=0.5),
                #     ], p=0.5),

                A.OneOf([
                    A.ElasticTransform(alpha=120, sigma=120 * 0.03, alpha_affine=120 * 0.03, border_mode=0),
                    A.GridDistortion(border_mode=0),
                    A.OpticalDistortion(distort_limit=2, shift_limit=0.5, border_mode=0),
                    ], p=0.3),
                A.ShiftScaleRotate(scale_limit=0.06,p=0.5, border_mode=0),
            ])

    def build_slices(self):
        self.images = []
        self.masks = []
        # for i, fn in enumerate(os.listdir("{}/imgs".format(self.path))):
        #     self.images.append("{}/imgs/{}_prop.npy".format(self.path, i))
        #     self.masks.append("{}/masks/{}_prop_mask.npy".format(self.path, i))
        for i, fn in enumerate(os.listdir("{}/imgs".format(self.path))):
            self.images.append("{}/imgs/{}".format(self.path, fn))
            fn = fn.split(".")
            fn = "{}_mask.{}".format(*fn) 
            self.masks.append("{}/masks/{}".format(self.path, fn))

    def set_transform_flg(self, flg:bool):
        self.valid_mode = flg

    def __getitem__(self, index):
        image = np.load(self.images[index])
        mask = np.load(self.masks[index])

        if mask.shape[0] > 1050:
        # if "prop" in self.images[index]:
            margin_y = np.random.randint(0, self.overlap)
            margin_x = np.random.randint(0, self.overlap)
            if self.valid_mode:
                margin_y = self.overlap//2
                margin_x = self.overlap//2
            image = image[margin_y:(margin_y-self.overlap), margin_x:(margin_x-self.overlap), :]
            mask = mask[margin_y:(margin_y-self.overlap), margin_x:(margin_x-self.overlap),]

        augments = self.resizefunc(image=image, mask=mask)
        image, mask = augments['image'], augments['mask']

        if not self.valid_mode:
            augments = self.transform(image=image, mask=mask)
            image = augments['image']
            mask = augments['mask']

        
        # plt.figure(figsize=(15,15))
        # plt.subplot("121")
        # plt.axis('off')
        # plt.imshow(image)
        # plt.imshow(mask,alpha=0.5, cmap='gray')
        # plt.subplot("122")
        # plt.axis('off')
        # plt.imshow(image)
        # plt.show()
        # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        # plt.figure(figsize=(15,15))
        # plt.subplot("121")
        # plt.axis('off')
        # plt.imshow(image)
        # plt.imshow(mask,alpha=0.5, cmap='gray')
        # plt.subplot("122")
        # plt.axis('off')
        # plt.imshow(image)
        # plt.show()
        return self.as_tensor(image), mask[None]
    
    def __len__(self):
        """
        Total number of samples in the dataset
        """
        return self.len

if __name__ == "__main__":
    # ds = D.DataLoader(HubDataset("E:/睡眠分期数据/hubmap-kidney-segmentation", imgsize=(512,512), valid_mode=False)
    #     , batch_size=16, shuffle=True, num_workers=0)
    ds = HubDataset("E:/hubmap", imgsize=(256,256), valid_mode=False)
    indexs = np.arange(0, len(ds), 1)
    np.random.shuffle(indexs)
    train_idx = indexs[:int(len(ds)*0.8)]
    valid_idx = indexs[int(len(ds)*0.8):]
    train_ds = D.Subset(ds, train_idx)
    valid_ds = D.Subset(ds, valid_idx)

    trainDL = D.DataLoader(train_ds, batch_size=16, shuffle=True, num_workers=0)
    valDL = D.DataLoader(valid_ds, batch_size=16, shuffle=True, num_workers=0)
    ds.valid_mode = False
    for img, mask in tqdm(trainDL):
        None
        # print(img.shape)
        # print(mask.shape)
        # break
    ds.valid_mode = True
    for img, mask in tqdm(valDL):
        None