import os
import cv2
import numpy as np
from PIL import Image,ImageFilter
import cv2
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import random
from scipy.spatial.distance import cdist
from tools.plotting import plot_matches


def random_choice(array, size):
    rand = np.random.RandomState(1234)
    num_data = len(array)
    if num_data > size:
        idx = rand.choice(num_data, size, replace=False)
    else:
        idx = rand.choice(num_data, size, replace=True)
    return array[idx]

class GaussianBlur:
    """
    Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709
    Adapted from MoCo:
    https://github.com/facebookresearch/moco/blob/master/moco/loader.py
    Note that this implementation does not seem to be exactly the same as
    described in SimCLR.
    """

    def __init__(self, sigma=[0.1, 2.0]):
        self.sigma = sigma

    def __call__(self, x):
        sigma = random.uniform(self.sigma[0], self.sigma[1])
        x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
        return x


class ADE20K(Dataset):
    """
    ADE20K dataset `http://sceneparsing.csail.mit.edu/`.

    Args:
        transforms (list): A list of image transformations.
        dataset_root (str, optional): The ADK20K dataset directory. Default: None.
        mode (str, optional): A subset of the entire dataset. It should be one of ('train', 'val'). Default: 'train'.
        edge (bool, optional): Whether to compute edge while training. Default: False
    """
    NUM_CLASSES = 150
    IGNORE_INDEX = 255
    IMG_CHANNELS = 3

    def __init__(self, dataset_root=None, mode='train',num_pts=1024,edge=False,**kwargs):
        self.dataset_root = dataset_root
        self.transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ColorJitter(brightness=0.5, contrast=0.5),
            transforms.RandomGrayscale(p=0.1),
            transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
            transforms.ToTensor()
        ])
        mode = mode.lower()
        self.mode = mode
        self.file_list = list()
        self.num_classes = self.NUM_CLASSES
        self.ignore_index = self.IGNORE_INDEX
        self.edge = edge
        self.num_pts = num_pts
        self.random_percent = 0.5

        if mode not in ['train', 'val']:
            raise ValueError(
                "`mode` should be one of ('train', 'val') in ADE20K dataset, but got {}."
                .format(mode))

        if self.transform is None:
            raise ValueError("`transforms` is necessary, but it is None.")


        if mode == 'train':
            img_dir = os.path.join(self.dataset_root, 'images/training')
            label_dir = os.path.join(self.dataset_root, 'annotations/training')
        elif mode == 'val':
            img_dir = os.path.join(self.dataset_root, 'images/validation')
            label_dir = os.path.join(self.dataset_root,
                                     'annotations/validation')
        img_files = os.listdir(img_dir)
        label_files = [i.replace('.jpg', '.png') for i in img_files]
        for i in range(len(img_files)):
            img_path = os.path.join(img_dir, img_files[i])
            label_path = os.path.join(label_dir, label_files[i])
            self.file_list.append([img_path, label_path])

    def generate_query_kpts(self, img, num_pts, h, w, mode='mixed'):
    # generate candidate query points
        if mode == 'random':
            kp1_x = np.random.rand(num_pts) * (w - 1)
            kp1_y = np.random.rand(num_pts) * (h - 1)
            coord = np.stack((kp1_x, kp1_y, np.zeros(kp1_x.shape))).T

        elif mode == 'sift':
            gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            # sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
            sift = cv2.SIFT_create(nfeatures=num_pts)
            kp1 = sift.detect(gray1)
            coord = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1])

        elif mode == 'mixed':
            kp1_x = np.random.rand(1 * int(self.random_percent * num_pts)) * (w - 1)
            kp1_y = np.random.rand(1 * int(self.random_percent * num_pts)) * (h - 1)
            kp1_rand = np.stack((kp1_x, kp1_y, np.zeros(kp1_x.shape))).T

            # sift = cv2.xfeatures2d.SIFT_create(nfeatures=int(0.5 * num_pts))
            sift = cv2.SIFT_create(nfeatures=int((1-self.random_percent) * num_pts))
            # sift = cv2.SIFT_create()
            gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            kp1_sift = sift.detect(gray1)
            kp1_sift = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1_sift])
            if len(kp1_sift) == 0:
                coord = kp1_rand
            else:
                coord = np.concatenate((kp1_rand, kp1_sift), 0)

        else:
            raise Exception('unknown type of keypoints')

        return coord

    def __getitem__(self, idx):
        image_path, label_path = self.file_list[idx]
        img_meta = cv2.imread(image_path)
        img_label = cv2.imread(label_path,cv2.COLOR_BGR2GRAY)
        # 将图片尺寸统一
        img_meta = cv2.resize(img_meta,(640,480))
        img_label = cv2.resize(img_label,(640,480))
        # for image match
        h1, w1 = img_meta.shape[:2]
        corners = np.array([[0, 0], [0, h1], [w1, 0], [w1, h1]], dtype=np.float32)
        warp = np.random.randint(-224, 224, size=(4, 2)).astype(np.float32)
        M = cv2.getPerspectiveTransform(corners, corners + warp)
        warped_img = cv2.warpPerspective(src=img_meta, M=M, dsize=(img_meta.shape[1], img_meta.shape[0]))  # return an image type
        warped_label = cv2.warpPerspective(src=img_label, M=M, dsize=(img_label.shape[1], img_label.shape[0]))  # return an image type
        h2, w2 = warped_img.shape[:2]
        kp1 = self.generate_query_kpts(img_meta, 10 * self.num_pts, h1, w1)[:, :2]
        kp2 = self.generate_query_kpts(warped_img, 10 * self.num_pts, h2, w2)[:, :2]
        kp1 = random_choice(kp1, self.num_pts)
        kp2 = random_choice(kp2, self.num_pts)
        kp1_projected = cv2.perspectiveTransform(kp1.reshape((1, -1, 2)), M)[0, :, :]
        dists = cdist(kp1_projected, kp2)

        min1 = np.argmin(dists, axis=0)
        min2 = np.argmin(dists, axis=1)

        min1v = np.min(dists, axis=1)
        min1f = min2[min1v < 3]

        xx = np.where(min2[min1] == np.arange(min1.shape[0]))[0]
        matches = np.intersect1d(min1f, xx)

        missing1 = np.setdiff1d(np.arange(kp1.shape[0]), min1[matches])
        missing2 = np.setdiff1d(np.arange(kp2.shape[0]), matches)

        MN = np.concatenate([min1[matches][np.newaxis, :], matches[np.newaxis, :]])
        MN2 = np.concatenate([missing1[np.newaxis, :], (len(kp2)) * np.ones((1, len(missing1)), dtype=np.int64)])
        MN3 = np.concatenate([(len(kp1)) * np.ones((1, len(missing2)), dtype=np.int64), missing2[np.newaxis, :]])
        # if True:
        #     MNN = [kp1[MN[0]], kp2[MN[1]]]
        #     Mnn = np.concatenate(MNN, axis=1)
        #     plot_matches(img_meta, warped_img, Mnn, radius=2, lines=True)

        # img_resize[480 640]
        if self.mode == 'val':
            img1 = img_meta
            img2 = warped_img
        else:
            img1 = self.transform(img_meta)
            img2 = self.transform(warped_img)

        img_meta = np.array(img_meta)
        img1_label = np.array(img_label)
        img2_label = np.array(warped_label)
        data = {"img1_meta": img_meta,
                "img2_meta": warped_img,
                "img1": img1,
                "img2": img2,
                "img1_label": img1_label,
                "img2_label": img2_label,
                'coord1': np.array(kp1,np.float32),
                'coord2': np.array(kp2,np.float32),
                'all_matches': MN
                }
        return data

    def __len__(self):
        return len(self.file_list)