import math
import numbers
import os
import random

import numpy as np
import pytorch3d.ops
import torch
from torch.utils.data import Dataset
from torchvision.transforms import Compose
from tqdm.auto import tqdm

def points_to_voxel(points, voxel_shape, voxel_size): # (0.15, 0.15, 0.15)
    # first, filter out the points behind the camera 
    points = points[points[:, 0] >= 0] 
    lb = - voxel_shape // 2 * voxel_size
    lb[0] = 0.0
    p_shifted = (points - lb)
    coors = np.floor(p_shifted / voxel_size)

    valid = (coors >= 0).all(axis=1) & (coors < voxel_shape).all(axis=1)
    p_shifted = p_shifted[valid]
    coors = coors[valid]
        
    # assert p_shifted.shape[0] > 100
    return p_shifted, coors

def load_points(points, voxel_shape, voxel_size):
    points = points[:, :3]
    # from utils.general_utils import save_point_cloud_to_ply
    # save_point_cloud_to_ply(points, "output/test.ply")
    p_shifted, coors = points_to_voxel(points, voxel_shape, voxel_size=voxel_size)
    return p_shifted

class NormalizeUnitSphere(object):
    def __init__(self):
        super().__init__()

    @staticmethod
    def normalize(pcl, center=None, scale=None):
        """
        Args:
            pcl:  The point cloud to be normalized, (N, 3)
        """
        is_numpy = False
        if isinstance(pcl, np.ndarray):
            is_numpy = True
            pcl = torch.from_numpy(pcl).float()
        if center is None:
            p_max = pcl.max(dim=0, keepdim=True)[0]
            p_min = pcl.min(dim=0, keepdim=True)[0]
            center = (p_max + p_min) / 2  # (1, 3)
        pcl = pcl - center
        if scale is None:
            scale = (pcl**2).sum(dim=1, keepdim=True).sqrt().max(dim=0, keepdim=True)[0]  # (1, 1)
        pcl = pcl / scale
        if is_numpy:
            pcl = pcl.numpy()
        return pcl, center, scale

    # def __call__(self, data):
    #     assert "pcl_noisy" not in data, "Point clouds must be normalized before applying noise perturbation."
    #     data["pcl_clean"], center, scale = self.normalize(data["pcl_clean"])
    #     data["center"] = center
    #     data["scale"] = scale
    #     return data

# class RandomRotate(object):
#     def __init__(self, degrees=180.0, axis=0):
#         if isinstance(degrees, numbers.Number):
#             degrees = (-abs(degrees), abs(degrees))
#         assert isinstance(degrees, (tuple, list)) and len(degrees) == 2
#         self.degrees = degrees
#         self.axis = axis

#     def __call__(self, data):
#         degree = math.pi * random.uniform(*self.degrees) / 180.0
#         sin, cos = math.sin(degree), math.cos(degree)

#         if self.axis == 0:
#             matrix = [[1, 0, 0], [0, cos, sin], [0, -sin, cos]]
#         elif self.axis == 1:
#             matrix = [[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]]
#         else:
#             matrix = [[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]]
#         matrix = torch.tensor(matrix)

#         data["pcl_clean"] = torch.matmul(data["pcl_clean"], matrix)
#         if "pcl_noisy" in data:
#             data["pcl_noisy"] = torch.matmul(data["pcl_noisy"], matrix)

#         return data

# def standard_train_transforms(noise_std_min, noise_std_max, scale_d=0.2, rotate=True):
#     transforms = [
#         # NormalizeUnitSphere(),
#         # AddNoise(noise_std_min=noise_std_min, noise_std_max=noise_std_max),
#         # RandomScale([1.0 - scale_d, 1.0 + scale_d]),
#     ]
#     if rotate:
#         transforms += [
#             RandomRotate(axis=0),
#             RandomRotate(axis=1),
#             RandomRotate(axis=2),
#         ]
#     return Compose(transforms)

# taken from [ScoreDenoise]
class PointCloudDataset(Dataset):
    def __init__(self, data_root, recon_root, transform=None, voxel_shape=(30, 30, 15), voxel_size=(0.15, 0.15, 0.15), frame_down=50):
        super().__init__()
        if not isinstance(voxel_shape, np.ndarray):
            voxel_shape = np.array(voxel_shape, dtype=np.int32)
        if not isinstance(voxel_size, np.ndarray):
            voxel_size = np.array(voxel_size, dtype=np.float32)
        assert len(voxel_shape) == len(voxel_size) == 3
        self._voxel_shape = voxel_shape
        self._voxel_size = voxel_size
        self._point_list = []
        self._data_root = data_root
        self._recon_root = recon_root
        for data_dir in tqdm(os.listdir(recon_root), desc="Processing directories"):
            for file in os.listdir(os.path.join(recon_root, data_dir)):
                if not file.endswith(".npy"):
                    continue
                frame_num_str = file.split(".")[0]
                if int(frame_num_str) % frame_down != 0:
                    continue
                self._point_list.append(os.path.join(data_dir, file))
                # shutil.copy(os.path.join(data_root, data_dir, file), os.path.join(self._points_dir, f"{data_seg}_{frame_num_str.zfill(4)}.npy"))
            break
        self._point_list = sorted(self._point_list)
        self.transform = transform
        self.pointclouds = []
        self.recons = []
        self.pointcloud_names = []
        for fn in tqdm(self._point_list, desc="Loading"):
            # if fn[-3:] != "xyz":
            #     continue
            gt_pcl_path = os.path.join(self._data_root, fn)
            if not os.path.exists(gt_pcl_path):
                raise FileNotFoundError("File not found: %s" % gt_pcl_path)
            gt_pcl = np.load(gt_pcl_path)
            p_full = load_points(gt_pcl, self._voxel_shape, self._voxel_size)

            recon_pcl_path = os.path.join(self._recon_root, fn)
            if not os.path.exists(recon_pcl_path):
                raise FileNotFoundError("File not found: %s" % recon_pcl_path)
            p_part = np.load(recon_pcl_path)
            normolizer = NormalizeUnitSphere()
            p_part, center, scale = normolizer.normalize(p_part)
            p_full, _, _ = normolizer.normalize(p_full, center=center, scale=scale)
            
            gt_pcl = torch.FloatTensor(p_full.astype(np.float32))
            recon_pcl = torch.FloatTensor(p_part.astype(np.float32))
            self.pointclouds.append(gt_pcl)
            self.pointcloud_names.append("NULL")
            self.recons.append(recon_pcl)

    def __len__(self):
        return len(self.pointclouds)

    def __getitem__(self, idx):
        data = {"pcl_clean": self.pointclouds[idx].clone(), "name": self.pointcloud_names[idx], "pcl_noisy": self.recons[idx].clone()}
        # if self.transform is not None:
        #     data = self.transform(data)
        return data

def get_dataset(
    data_root,
    recon_root,
    patch_size=2048
):
    # if noise_max > 0:
    #     transform = standard_train_transforms(noise_std_max=noise_max, noise_std_min=noise_min, rotate=aug_rotate)
    # else:
    #     assert False
        # transform = standard_train_transforms_clean(rotate=aug_rotate)
    transform = None
    ds = PairedPatchDataset(
        datasets=[
            PointCloudDataset(data_root=data_root, recon_root=recon_root, transform=transform, voxel_shape=[512, 512, 64], voxel_size=[0.15625, 0.15625, 0.140625], frame_down=1)
        ],
        patch_size=patch_size,
        patch_ratio=1.0,
        on_the_fly=True,
    )
    return ds

def make_patches_for_pcl_pair(pcl_A, pcl_B, patch_size, num_patches, ratio):
    """
    Args:
        pcl_A:  The first point cloud, (N, 3).
        pcl_B:  The second point cloud, (rN, 3).
        patch_size:   Patch size M.
        num_patches:  Number of patches P.
        ratio:    Ratio r.
    Returns:
        (P, M, 3), (P, rM, 3)
    """
    N = pcl_A.size(0)
    seed_idx = torch.randperm(N)[:num_patches]  # (P, )
    seed_pnts = pcl_A[seed_idx].unsqueeze(0)  # (1, P, 3)
    _, _, pat_A = pytorch3d.ops.knn_points(
        seed_pnts, pcl_A.unsqueeze(0), K=patch_size, return_nn=True, return_sorted=False
    )
    pat_A = pat_A[0]  # (P, M, 3)
    _, _, pat_B = pytorch3d.ops.knn_points(
        seed_pnts, pcl_B.unsqueeze(0), K=int(ratio * patch_size), return_nn=True, return_sorted=False
    )
    pat_B = pat_B[0]
    return pat_A, pat_B


class PairedPatchDataset(Dataset):
    def __init__(self, datasets, patch_ratio, on_the_fly=True, patch_size=1000, num_patches=1000, transform=None):
        super().__init__()
        self.datasets = datasets
        self.len_datasets = sum([len(dset) for dset in datasets])
        self.patch_ratio = patch_ratio
        self.patch_size = patch_size
        self.num_patches = num_patches
        self.on_the_fly = on_the_fly
        self.transform = transform
        self.patches = []
        # Initialize
        if not on_the_fly:
            self.make_patches()

    def make_patches(self):
        for dataset in tqdm(self.datasets, desc="MakePatch"):
            for data in tqdm(dataset):
                pat_noisy, pat_clean = make_patches_for_pcl_pair(
                    data["pcl_noisy"],
                    data["pcl_clean"],
                    patch_size=self.patch_size,
                    num_patches=self.num_patches,
                    ratio=self.patch_ratio,
                )  # (P, M, 3), (P, rM, 3)
                for i in range(pat_noisy.size(0)):
                    self.patches.append(
                        (
                            pat_noisy[i],
                            pat_clean[i],
                        )
                    )

    def __len__(self):
        if not self.on_the_fly:
            return len(self.patches)
        else:
            return self.len_datasets * self.num_patches

    def __getitem__(self, idx):
        if self.on_the_fly:
            pcl_dset = random.choice(self.datasets)
            pcl_data = pcl_dset[idx % len(pcl_dset)]
            pat_noisy, pat_clean = make_patches_for_pcl_pair(
                pcl_data["pcl_noisy"],
                pcl_data["pcl_clean"],
                patch_size=self.patch_size,
                num_patches=1,
                ratio=self.patch_ratio,
            )
            data = {"pcl_noisy": pat_noisy[0], "pcl_clean": pat_clean[0]}
        else:
            data = {
                "pcl_noisy": self.patches[idx][0].clone(),
                "pcl_clean": self.patches[idx][1].clone(),
            }

        if self.transform is not None:
            data = self.transform(data)

        # # centering
        # center = data["pcl_clean"].mean(dim=0)
        # data["pcl_noisy"] -= center
        # data["pcl_clean"] -= center

        # # scale to unit sphere
        # scale = torch.max(torch.norm(data["pcl_noisy"], dim=1))
        # print(data["pcl_noisy"].shape, scale.item())
        # data["pcl_noisy"] /= scale
        # data["pcl_clean"] /= scale

        new_data = {
            "noisy_points": data["pcl_noisy"],
            "clean_points": data["pcl_clean"],
            # "center": center,
            # "scale": scale,
        }
        return new_data
