import numpy as np
import torch
import h5py
from torch.utils.data import Dataset
from torchvision.transforms import Normalize
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import config as cfg
from utils.imutils import crop, flip_img, flip_pose, flip_kp, transform, rot_aa
import cv2
import random
from scipy.spatial.transform import Rotation as R
from skimage import io
from skimage.transform import resize
from torchvision import transforms


class BaseDataset(Dataset):

    """
    Base Dataset Class - Handles data loading and augmentation.
    Able to handle heterogeneous datasets (different annotations available for different datasets).
    You need to update the path to each dataset in utils/config.py.
    """

    def __init__(self, options, dataset, is_train=True, is_use_image=None, is_aug=False,
                 remove_randomness=True, is_visual=False):
        super(BaseDataset, self).__init__()
        self.dataset = dataset
        self.is_train = is_train
        self.options = options
        self.remove_randomness = remove_randomness
        self.normalize_img = Normalize(
            mean=cfg.IMG_NORM_MEAN, std=cfg.IMG_NORM_STD)
        if is_visual:
            self.data = np.load(
                cfg.VISUAL_DATASET_FILES[dataset], allow_pickle=True)
        else:
            self.data = np.load(
                cfg.DATASET_FILES[is_train][dataset], allow_pickle=True)
        self.imgname = self.data['imgname']
        self.use_image = is_use_image
        self.aug = is_aug
        self.epoch = -1
        self.is_visual = is_visual

        # Get paths to gt masks, if available
        try:
            self.maskname = self.data['maskname']
        except KeyError:
            pass
        try:
            self.partname = self.data['partname']
        except KeyError:
            pass

        # Bounding boxes are assumed to be in the center and scale format
        try:
            self.scale = self.data['scale']
            self.center = self.data['center']
            self.length = self.scale.shape[0]
        except KeyError:
            pass

        # Get gt SMPL parameters, if available
        try:
            self.pose = self.data['pose'].astype(np.float)
            self.betas = self.data['shape'].astype(np.float)
            self.trans = self.data['trans'].astype(np.float)
            self.has_smpl = 1
        except KeyError:
            self.has_smpl = 0

        # Get gt 3D pose, if available
        try:
            self.pose_3d = self.data['S']
            self.has_pose_3d = 1
        except KeyError:
            self.has_pose_3d = 0

        # Get 2D keypoints
        try:
            self.keypoints = self.data['part']
        except KeyError:
            self.keypoints = np.zeros((len(self.imgname), 24, 3))

        # Get 3D mesh vertex, if available
        try:
            self.mesh_vertex = self.data['human_vertex']
            self.has_vertex = 1
        except KeyError:
            self.has_vertex = 0

        # Get human point cloud, if available
        try:
            self.human_points = self.data['point_clouds']
            self.has_human_points = 1
        except KeyError:
            self.has_human_points = 0

        # Get timestap, if available
        try:
            self.timestamp = self.data['timestamp']
            self.has_timestamp = 1
        except KeyError:
            self.has_timestamp = 0

        # ----------------------------
        #
        # those parameters will be
        # used in visualization
        # process ~ ~ ~
        #
        # ----------------------------
        # if is_visual:
        #     self.top_x = self.data['top_x']
        #     self.top_y = self.data['top_y']
        #     self.down_x = self.data['down_x']
        #     self.down_y = self.data['down_y']
        #     self.points_path = self.data['points_path']
        #     self.mesh_path = self.data['mesh_path']

    def augm_params(self):
        """Get augmentation parameters."""
        flip = 0  # flipping
        pn = np.ones(3)  # per channel pixel-noise
        rot = 0  # rotation
        sc = 1  # scaling
        if self.is_train:
            # We flip with probability 1/2
            if np.random.uniform() <= 0.5:
                flip = 1

            # Each channel is multiplied with a number
            # in the area [1-opt.noiseFactor,1+opt.noiseFactor]
            pn = np.random.uniform(1 - self.options.noise_factor,
                                   1 + self.options.noise_factor, 3)

            # The rotation is a number in the area [-2*rotFactor, 2*rotFactor]
            rot = min(2 * self.options.rot_factor,
                      max(-2 * self.options.rot_factor,
                          np.random.randn() * self.options.rot_factor))

            # The scale is multiplied with a number
            # in the area [1-scaleFactor,1+scaleFactor]
            sc = min(1 + self.options.scale_factor,
                     max(1 - self.options.scale_factor,
                         np.random.randn() * self.options.scale_factor + 1))
            # but it is zero with probability 3/5
            if np.random.uniform() <= 0.6:
                rot = 0

        return flip, pn, rot, sc

    def rgb_processing(self, rgb_img, center, scale, rot, flip, pn):
        """Process rgb image and do augmentation."""
        rgb_img = crop(rgb_img, center, scale,
                       [self.options.img_res, self.options.img_res], rot=rot)
        # flip the image
        if flip:
            rgb_img = flip_img(rgb_img)
        # in the rgb image we add pixel noise in a channel-wise manner
        rgb_img[:, :, 0] = np.minimum(
            255.0, np.maximum(0.0, rgb_img[:, :, 0] * pn[0]))
        rgb_img[:, :, 1] = np.minimum(
            255.0, np.maximum(0.0, rgb_img[:, :, 1] * pn[1]))
        rgb_img[:, :, 2] = np.minimum(
            255.0, np.maximum(0.0, rgb_img[:, :, 2] * pn[2]))
        # (3,224,224),float,[0,1]
        rgb_img = np.transpose(rgb_img.astype('float32'), (2, 0, 1)) / 255.0
        return rgb_img

    def j2d_processing(self, kp, center, scale, r, f):
        """Process gt 2D keypoints and apply all augmentation transforms."""
        nparts = kp.shape[0]
        for i in range(nparts):
            kp[i, 0:2] = transform(kp[i, 0:2] + 1, center, scale,
                                   [self.options.img_res, self.options.img_res], rot=r)
        # convert to normalized coordinates
        kp[:, :-1] = 2. * kp[:, :-1] / self.options.img_res - 1.
        # flip the x coordinates
        if f:
            kp = flip_kp(kp)
        kp = kp.astype('float32')
        return kp

    def j3d_processing(self, S, r, f):
        """Process gt 3D keypoints and apply all augmentation transforms."""
        # in-plane rotation
        rot_mat = np.eye(3)
        if not r == 0:
            rot_rad = -r * np.pi / 180
            sn, cs = np.sin(rot_rad), np.cos(rot_rad)
            rot_mat[0, :2] = [cs, -sn]
            rot_mat[1, :2] = [sn, cs]
        S = np.einsum('ij,kj->ki', rot_mat, S)
        # flip the x coordinates
        if f:
            S = flip_kp(S)
        S = S.astype('float32')
        return S

    def pose_processing(self, pose, r, f):
        """Process SMPL theta parameters  and apply all augmentation transforms."""
        # rotation or the pose parameters
        pose[:3] = rot_aa(pose[:3], r)
        # flip the pose parameters
        if f:
            pose = flip_pose(pose)
        # (72),float
        pose = pose.astype('float32')
        return pose

    def __getitem__(self, index):

        random_state = random.getstate()
        np_random_state = np.random.get_state()

        if self.remove_randomness and self.is_train:
            random.seed(index + self.epoch * len(self))
            np.random.seed(index + self.epoch * len(self))

        if self.remove_randomness and not self.is_train:
            random.seed(index)
            np.random.seed(index)

        item = {}
        has_scale = True
        try:
            scale = self.scale[index].copy()
            center = self.center[index].copy()
        except AttributeError:
            has_scale = False

        # Get augmentation parameters
        flip, pn, rot, sc = self.augm_params()

        # Load image
        if self.use_image:
            imgname = self.imgname[index]
            try:
                img = cv2.imread(imgname)[:, :, ::-1].copy().astype(np.float32)
            except TypeError:
                print(imgname)
            orig_shape = np.array(img.shape)[:2]

        # Get SMPL parameters, if available
        if self.has_smpl:
            pose = self.pose[index].copy()
            betas = self.betas[index].copy()
            trans = self.trans[index].copy()
        else:
            pose = np.zeros(72)
            betas = np.zeros(10)
            trans = np.zeros(3)

        if self.use_image:
            # Process image
            img = self.rgb_processing(img, center, sc * scale, rot, flip, pn)
            img = torch.from_numpy(img).float()
            # Store image before normalization to use it in visualization
            item['img_orig'] = img.clone()
            item['img'] = self.normalize_img(img)
            item['imgname'] = imgname
        # else:
        #     item['img_orig'] = None
        #     item['img'] = None
        #     item['imgname'] = None

        # Get 3D pose, if available
        if self.has_pose_3d:
            S = self.pose_3d[index].copy()
        else:
            item['pose_3d'] = torch.zeros(24, 4, dtype=torch.float32)

        # Get 2D keypoints and apply augmentation transforms
        keypoints = self.keypoints[index].copy()

        # Get 3D mesh vertex
        if self.has_vertex:
            human_vertex = self.mesh_vertex[index].copy()
            item['human_vertex'] = torch.from_numpy(human_vertex).float()
        # else:
        #     item['human_vertex'] =None

        # Get 3D human points
        if self.has_human_points:
            # human_points = np.zeros((574, 3), dtype=np.float32)
            # human_points_stash = self.human_points[index].copy()
            # points_num = min(
            #     human_points_stash.shape[0], human_points.shape[0])

            # human_points[:points_num] = human_points_stash[:points_num]
            # item['human_points'] = torch.from_numpy(human_points).float()
            # item['points_num'] = torch.tensor(points_num)
            item['human_points'] = torch.from_numpy(
                self.human_points[index]).float()
            item['points_num'] = item['human_points'].size(0)
        # else:
        #     item['human_points']=None

        # #Get timestamp
        # if self.has_timestamp:
        #     timestamp=self.timestamp[index].copy()
        #     item['timestamp']=torch.from_numpy(timestamp).float()

        # -----------------------------
        # get visualization parameters
        # -----------------------------
        # if self.is_visual:
        #     item['top_x'] = int(self.top_x[index])
        #     item['top_y'] = int(self.top_y[index])
        #     item['down_x'] = int(self.down_x[index])
        #     item['down_y'] = int(self.down_y[index])
        #     item['points_path'] = self.points_path[index]
        #     item['mesh_path'] = self.mesh_path[index]

        if self.aug:
            item['pose'] = torch.from_numpy(
                self.pose_processing(pose, rot, flip)).float()
            item['betas'] = torch.from_numpy(betas).float()
            item['trans'] = torch.from_numpy(trans).float()
            St = self.j3d_processing(S.copy()[:, :-1], rot, flip)
            S[:, :-1] = St
            item['pose_3d'] = torch.from_numpy(S).float()
            item['keypoints'] = torch.from_numpy(self.j2d_processing(
                keypoints, center, sc * scale, rot, flip)).float()

        else:
            item['pose'] = torch.from_numpy(pose).float()
            item['betas'] = torch.from_numpy(betas).float()
            item['trans'] = torch.from_numpy(trans).float()
            item['keypoints'] = torch.from_numpy(keypoints).float()

        item['img_name'] = self.imgname[index]
        item['has_smpl'] = self.has_smpl
        item['has_pose_3d'] = self.has_pose_3d
        item['has_vertex'] = self.has_vertex
        item['has_human_points'] = self.has_human_points
        item['has_timestamp'] = self.has_timestamp
        if has_scale:
            item['scale'] = float(sc * scale)
            item['center'] = center.astype(np.float32)

        # Pass path to segmentation mask, if available
        # Cannot load the mask because each mask has different size, so they cannot be stacked in one tensor
        # try:
        #     item['maskname'] = self.maskname[index]
        # except AttributeError:
        #     item['maskname'] = None
        # try:
        #     item['partname'] = self.partname[index]
        # except AttributeError:
        #     item['partname'] = None

        if self.remove_randomness:
            random.setstate(random_state)
            np.random.set_state(np_random_state)
        return item

    def __len__(self):
        return len(self.imgname)


def pc_normalize(pc):
    pc[..., 0:2] -= np.mean(pc[..., 0:2], axis=1, keepdims=True)
    pc[..., 2] -= np.mean(pc[..., 2])
    pc /= 1.2
    return pc


def augment(points, points_num):  # (T, N, 3), (T, )
    T, N = points.shape[:2]
    augmented_points = points.copy()

    # 放缩
    scale = np.random.uniform(0.9, 1.1)
    augmented_points *= scale

    # 随机丢弃，至少保留50个点
    dropout_ratio = np.clip(0, np.random.random() *
                            (1 - 50 / np.min(points_num)), 0.5)
    drop_idx = np.where(np.random.random((T, N)) <= dropout_ratio)
    augmented_points[drop_idx] = augmented_points[0][0]

    # 高斯噪声
    jittered_points = np.clip(
        0.01 * np.random.randn(*augmented_points.shape), -0.05, 0.05)
    augmented_points += jittered_points

    return augmented_points


class TemporalDataset(Dataset):
    def __init__(self, dataset, is_train=True, remove_randomness=True, is_visual=False, need_depth=False):
        super().__init__()
        self.dataset = dataset
        self.is_train = is_train
        self.remove_randomness = remove_randomness

        if is_visual:
            data_file = '/home/ljl/dataset/{}.hdf5'.format(dataset)
        else:
            data_file = '/home/ljl/dataset/{}_{}.hdf5'.format(
                dataset, 'train' if is_train else 'test')
        self.ext = os.path.splitext(data_file)[1]
        if self.ext == 'npz':
            self.data = np.load(data_file, allow_pickle=True)
        else:
            self.data = h5py.File(data_file, 'r')
        self.pose = self.data['pose']
        self.betas = self.data['shape']
        self.trans = self.data['trans']
        self.human_points = self.data['point_clouds']
        # self.human_vertex = self.data['human_vertex']
        self.points_num = self.data['points_num']
        if need_depth:
            self.depth_filenames = self.data['depth']
        # self.full_joints = self.data['full_joints']
        self.epoch = -1
        self.is_train = is_train
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        self.need_depth = need_depth

    def __del__(self):
        if self.ext == 'hdf5':
            self.data.close()

    @staticmethod
    def pad_and_scale(image):
        h, w, _ = image.shape
        l = max(h, w)
        pad_h = l - h
        pad_w = l - w
        top = pad_h // 2
        bottom = pad_h - pad_h // 2
        left = pad_w // 2
        right = pad_w - pad_w // 2
        R = np.expand_dims(np.pad(
            image[..., 0], ((top, bottom), (left, right)), constant_values=150), axis=-1)
        G = np.expand_dims(np.pad(
            image[..., 1], ((top, bottom), (left, right)), constant_values=200), axis=-1)
        B = np.expand_dims(np.pad(
            image[..., 2], ((top, bottom), (left, right)), constant_values=150), axis=-1)
        image = np.concatenate((R, G, B), axis=-1)
        image = resize(image, (224, 224))
        return image

    def __getitem__(self, index):

        # random_state = random.getstate()
        # np_random_state = np.random.get_state()

        # if self.remove_randomness and self.is_train:
        #     random.seed(index + self.epoch * len(self))
        #     np.random.seed(index + self.epoch * len(self))

        # if self.remove_randomness and not self.is_train:
        #     random.seed(index)
        #     np.random.seed(index)

        item = {}
        item['index'] = index
        # item['human_vertex'] = torch.from_numpy(
        #     self.human_vertex[index]).float()

        points = pc_normalize(self.human_points[index].copy())
        item['human_points'] = torch.from_numpy(points).float()
        item['pose'] = torch.from_numpy(self.pose[index]).float()

        if self.is_train:
            points_num = self.points_num[index]
            augmented_points = augment(points, points_num)
            item['human_points'] = torch.from_numpy(
                augmented_points).float()
        item['points_num'] = torch.from_numpy(self.points_num[index]).int()
        item['betas'] = torch.from_numpy(self.betas[index]).float()
        item['trans'] = torch.from_numpy(self.trans[index]).float()
        # item['full_joints'] = torch.from_numpy(self.full_joints[index]).float()

        if self.need_depth:
            filenames = self.depth_filenames[index]
            filenames = [filename.decode().replace(
                'xmu_gait', 'home/ljl/dataset') for filename in filenames]
            raw_images = [io.imread(filename) for filename in filenames]
            depths = [self.transform(self.pad_and_scale(raw_image))
                      for raw_image in raw_images]
            item['depths'] = torch.stack(depths).float()

        # if self.remove_randomness:
        #     random.setstate(random_state)
        #     np.random.set_state(np_random_state)
        return item

    def __len__(self):
        return self.pose.shape[0]


def threaded_loader(dataset, threads, batch_size=1, shuffle=True, drop_last=True):
    """ Get a data loader, given the dataset and some parameters.

    Parameters
    ----------
    dataset : object[i] returns the i-th training example.

    iscuda : bool

    batch_size : int

    threads : int

    shuffle : int

    Returns
    -------
        a multi-threaded pytorch loader.
    """
    return torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        sampler=None,
        num_workers=threads,
        pin_memory=True,
        collate_fn=collate,
        drop_last=drop_last)


# noinspection PyProtectedMember
def collate(batch, _use_shared_memory=True):
    """Puts each data field into a tensor with outer dimension batch size.
    Copied from https://github.com/pytorch in torch/utils/data/_utils/collate.py
    """
    import re
    error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
    elem_type = type(batch[0])
    if isinstance(batch[0], torch.Tensor):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.stack(batch, 0, out=out)
    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
            and elem_type.__name__ != 'string_':
        elem = batch[0]
        assert elem_type.__name__ == 'ndarray'
        # array of string classes and object
        if re.search('[SaUO]', elem.dtype.str) is not None:
            raise TypeError(error_msg.format(elem.dtype))
        batch = [torch.from_numpy(b) for b in batch]
        try:
            return torch.stack(batch, 0)
        except RuntimeError:
            return batch
    elif batch[0] is None:
        return list(batch)
    elif isinstance(batch[0], int):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], str):
        return batch
    elif isinstance(batch[0], dict):
        return {key: collate([d[key] for d in batch]) for key in batch[0]}
    elif isinstance(batch[0], (tuple, list)):
        transposed = zip(*batch)
        return [collate(samples) for samples in transposed]

    raise TypeError((error_msg.format(type(batch[0]))))


def tensor2img(tensor, model=None):
    """ convert back a torch/numpy tensor to a PIL Image
        by undoing the ToTensor() and Normalize() transforms.
    """
    # mean = norm_RGB.transforms[1].mean
    # std = norm_RGB.transforms[1].std
    if isinstance(tensor, torch.Tensor):
        tensor = tensor.detach().cpu().numpy()

    # res = np.uint8(np.clip(255*((tensor.transpose(1, 2, 0) * std) + mean), 0, 255))
    res = np.uint8(np.clip(255 * (tensor.transpose(1, 2, 0)), 0, 255))
    from PIL import Image
    return Image.fromarray(res)


if __name__ == '__main__':
    dataset = TemporalDataset('lidarcap_seq')
    loader = threaded_loader(dataset, threads=4, batch_size=8, drop_last=False)
    loader.dataset.epoch += 1
    for x in loader:
        print(x['human_points'].shape)
        exit()
