from collections import defaultdict
import numpy as np
import os
from PIL import Image
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image, ImageOps
import cv2
import matplotlib.pyplot as plt
import collections
import torch.nn.functional as F
from tqdm import tqdm

def crop(img, mask, size):
    # padding height or width if smaller than cropping size
    w, h = img.size
    padw = size - w if w < size else 0
    padh = size - h if h < size else 0
    img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
    mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=255)

    # cropping
    w, h = img.size
    x = random.randint(0, w - size)
    y = random.randint(0, h - size)
    img = img.crop((x, y, x + size, y + size))
    mask = mask.crop((x, y, x + size, y + size))

    return img, mask


def hflip(img, mask):
    if random.random() < 0.5:
        img = img.transpose(Image.FLIP_LEFT_RIGHT)
        mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
    return img, mask


def normalize(img, mask):
    """
    :param img: PIL image
    :param mask: PIL image, corresponding mask
    :return: normalized torch tensor of image and mask
    """
    img = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(img)
    mask = torch.from_numpy(np.array(mask)).long()
    return img, mask


def random_resize(img, mask, base_size=400, ratio_range=(0, 5, 2.0)):
    w, h = img.size
    long_side = random.randint(int(base_size * ratio_range[0]), int(base_size * ratio_range[1]))

    if h > w:
        oh = long_side
        ow = int(1.0 * w * long_side / h + 0.5)
    else:
        ow = long_side
        oh = int(1.0 * h * long_side / w + 0.5)

    img = img.resize((ow, oh), Image.BILINEAR)
    mask = mask.resize((ow, oh), Image.NEAREST)
    return img, mask


def cutout(img, mask, p=0.5, size_min=0.02, size_max=0.4, ratio_1=0.3,
           ratio_2=1/0.3, value_min=0, value_max=255, pixel_level=True):
    if random.random() < p:
        img = np.array(img)
        mask = np.array(mask)

        img_h, img_w, img_c = img.shape

        while True:
            size = np.random.uniform(size_min, size_max) * img_h * img_w
            ratio = np.random.uniform(ratio_1, ratio_2)
            erase_w = int(np.sqrt(size / ratio))
            erase_h = int(np.sqrt(size * ratio))
            x = np.random.randint(0, img_w)
            y = np.random.randint(0, img_h)

            if x + erase_w <= img_w and y + erase_h <= img_h:
                break

        if pixel_level:
            value = np.random.uniform(value_min, value_max, (erase_h, erase_w, img_c))
        else:
            value = np.random.uniform(value_min, value_max)

        img[y:y + erase_h, x:x + erase_w] = value
        mask[y:y + erase_h, x:x + erase_w] = 255

        img = Image.fromarray(img.astype(np.uint8))
        mask = Image.fromarray(mask.astype(np.uint8))

    return img, mask


def resize(image, label, size):
    ow, oh = (size, size)
    image = image.resize((ow, oh), Image.BILINEAR)
    label = label.resize((ow, oh), Image.NEAREST)
    return image, label


class Floating(Dataset):
    """
    FewShot generates support-query pairs in an episodic manner,
    intended for meta-training and meta-testing paradigm.
    """

    def __init__(self, root, size, mode, fold, shot, episode, crop_val=False):
        super(Floating, self).__init__()
        self.size = size
        self.mode = mode
        self.fold = fold
        self.shot = shot
        self.episode = episode
        self.crop_val = crop_val

        n_class = 12

        interval = n_class // 3
        if self.mode == 'train':
            # base classes = all classes - novel classes
            self.classes = set(range(1, n_class + 1)) - set(range(interval * fold + 1, interval * (fold + 1) + 1))
            self.img_path = os.path.join(root, "train", 'image')
            self.mask_path = os.path.join(root, "train", 'mask')
            self.id_path=os.path.join(root, "train", 'mask_jpg')


        else:
            # novel classes
            self.classes = set(range(interval * fold + 1, interval * (fold + 1) + 1))
            self.img_path = os.path.join(root, "test", 'image')
            self.mask_path = os.path.join(root, "test", 'mask')
            self.id_path=os.path.join(root, "test", 'mask_jpg')

        # 如果mode=train fold=0 。class={4,5,6,7,8,9,10,11,12}
        # 如果mode=train fold=1 。class={1,2,3,7,8,9,10,11,12}

        # 如果mode=test fold=0 。class={1,2,3}
        # 如果mode=test fold=1 。class={4,5,6}

        # the image ids must be stored in 'train.txt' and 'val.txt'
        # with open(os.path.join(self.id_path, '%s.txt' % mode), 'r') as f:  # 加载所有图片的名称
        #     self.ids = f.read().splitlines()

        self.ids = os.listdir(self.id_path)

        self.ids = [id.split(".")[0] for id in self.ids]

        self._filter_ids() # 过滤id

        self.cls_to_ids = self._map_cls_to_cls()  # 这里存储每个类的 图片路径。
        # 1:['USV_00434', 'USV_01711', 'USV_04633', 'USV_05610', 'USV_03442']
        
        # 2:['USV_03683', 'USV_03724', 'USV_00038', 'USV_04743', 'USV_00933']

    def __getitem__(self, item):
        # the sampling strategy is based on the description in OSLSM paper

        # query id, image, mask
        id_q = random.choice(self.ids)  # 随机挑选一张图片
        img_q = Image.open(os.path.join(self.img_path, id_q + ".jpg")).convert('RGB')  # 读取query的图片
        mask_q = Image.fromarray(np.array(Image.open(os.path.join(self.mask_path, id_q + ".png")).convert("P")))  # 读取mask
        # target class
        cls = random.choice(sorted(set(np.unique(mask_q)) & self.classes))  # 找到query图片对应的类

        # support ids, images and masks
        id_s_list, img_s_list, mask_s_list = [], [], []
        while True:
            id_s = random.choice(sorted(set(self.cls_to_ids[cls]) - {id_q} - set(id_s_list)))  # 从不同于query的类中随机挑选一张。
            img_s = Image.open(os.path.join(self.img_path, id_s + ".jpg")).convert('RGB')
            mask_s = Image.fromarray(np.array(Image.open(os.path.join(self.mask_path, id_s + ".png"))))

            # 支持图像中的小对象按照PFENet进行过滤
            if np.sum(np.array(mask_s) == cls) < 2 * 32 * 32:  # 目标太小，直接忽略
                continue

            id_s_list.append(id_s)
            img_s_list.append(img_s)
            mask_s_list.append(mask_s)
            if len(id_s_list) == self.shot:
                break

        if self.mode == 'train':  # 如果是train模式。图片进行数据增强
            img_q, mask_q = crop(img_q, mask_q, self.size)
            img_q, mask_q = hflip(img_q, mask_q)
            for k in range(self.shot):
                img_s_list[k], mask_s_list[k] = crop(img_s_list[k], mask_s_list[k], self.size)
                img_s_list[k], mask_s_list[k] = hflip(img_s_list[k], mask_s_list[k])

        elif self.mode == "val" and self.crop_val:
            img_q, mask_q = resize(img_q, mask_q, self.size)
            for k in range(self.shot):
                img_s_list[k], mask_s_list[k] = resize(img_s_list[k], mask_s_list[k], self.size)

        img_q, mask_q = normalize(img_q, mask_q)
        for k in range(self.shot):
            img_s_list[k], mask_s_list[k] = normalize(img_s_list[k], mask_s_list[k])

        # 通过将不相关的类设置为背景来筛选它们
        mask_q[(mask_q != cls) & (mask_q != 255)] = 0
        mask_q[mask_q == cls] = 1
        for k in range(self.shot):
            mask_s_list[k][(mask_s_list[k] != cls) & (mask_s_list[k] != 255)] = 0
            mask_s_list[k][mask_s_list[k] == cls] = 1

        # 支持集的图片，数组 len=shot,数组元素的shape=[3,473,473]
        # 支持集的掩码，数组 len=shot,数组元素的shape=[3,473,473]
        # 查询集图片，[3,473,473]
        # 查询集掩码，[473,473],里面只有0,1
        # 查询集类id, 例如：15
        # 支持集的图片名称集合，shot=5,结果就是['2008_000578', '2007_001704', '2007_002227', '2008_001896', '2008_000309']
        # 查询集的图片名称，例如 '2011_001621'
        return img_s_list, mask_s_list, img_q, mask_q, cls, id_s_list, id_q

    def __len__(self):
        return self.episode

    # 删除不包含任何验证类的图像 去除其他测试的类
    # 并删除验证对象都很小的图像（根据PFENet） 移除小物体
    def _filter_ids(self):
        for i in tqdm(range(len(self.ids) - 1, -1, -1)):
            mask = Image.fromarray(np.array(Image.open(os.path.join(self.mask_path, self.ids[i] + '.png')).convert("P")))
            classes = set(np.unique(mask)) & self.classes
            if not classes:
                del self.ids[i]
                continue

            # remove images whose valid objects are all small (according to PFENet)
            exist_large_objects = False
            for cls in classes:
                if np.sum(np.array(mask) == cls) >= 2 * 32 * 32:
                    exist_large_objects = True
                    break
            if not exist_large_objects:
                del self.ids[i]

    # map each valid class to a list of image ids
    def _map_cls_to_cls(self):
        cls_to_ids = defaultdict(list)
        for id_ in tqdm(self.ids):
            mask = np.array(Image.open(os.path.join(self.mask_path, id_ + ".png")))
            valid_classes = set(np.unique(mask)) & self.classes
            for cls in valid_classes:
                cls_to_ids[cls].append(id_)
        return cls_to_ids


def get_floating_dataset(root, fold, shot, snapshot, batch_size, crop_val=False):

    trainset = Floating(root=root, size=473, mode='train', fold=fold, shot=shot, episode=snapshot)
    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=4, drop_last=True)
    testset = Floating(root=root, size=473, mode='val', fold=fold, shot=shot, episode=300, crop_val=crop_val)
    testloader = DataLoader(testset, batch_size=1, shuffle=False, pin_memory=True, num_workers=4, drop_last=False)

    return trainloader, testloader


if __name__ == "__main__":
    trainloader, testloader = get_floating_dataset("/home/ubuntu/data/few-shot-dataset-2", fold=0, shot=5, snapshot=1000, batch_size=2)

    for img_s_list, mask_s_list, img_q, mask_q, cls, _, id_q in trainloader:

        plt.figure()
        plt.imshow(img_q[0].permute(1, 2, 0).numpy())
        plt.show()
        break
