import numpy as np
import warnings
import os
from torch.utils.data import Dataset
import torch

warnings.filterwarnings('ignore')


def pc_normalize(pc):
    centroid = np.mean(pc, axis=0)
    pc = pc - centroid
    m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
    pc = pc / m
    return pc


def farthest_point_sample(point, npoint):
    """
    Input:
        xyz: pointcloud data, [N, D]
        npoint: number of samples
    Return:
        centroids: sampled pointcloud index, [npoint, D]
    """
    N, D = point.shape
    xyz = point[:, :3]
    centroids = np.zeros((npoint,), dtype=np.int32)
    distance = np.ones((N,)) * 1e10
    farthest = np.random.randint(0, N)
    for i in range(npoint):
        centroids[i] = farthest
        centroid = xyz[farthest, :]
        dist = np.sum((xyz - centroid) ** 2, -1)
        mask = dist < distance
        distance[mask] = dist[mask]
        farthest = np.argmax(distance, -1)
    return point[centroids]


def random_sample_points_np(pc, npoints):
    """Randomly sample points from a point cloud."""
    idx = np.random.choice(pc.shape[0], npoints, replace=False)
    return pc[idx]


class ModelNetDataLoader(Dataset):
    def __init__(self, root, npoint=1024, split='train', normalize=True, augmentation=None, uniform=False, normal_channel=True, cache_size=15000):
        """
        Data loader for ModelNet dataset

        args:
            root(str):Root directory containing data.
            npoint (int): Number of points per sample.
            split (str): 'train' or 'test'.
            normalize : gui yi hua
            uniform (bool): Whether to use farthest point sampling.
            normal_channel (bool): Whether to keep normal channels.

        """
        assert split in ['train', 'test']
        self.root = root
        self.npoints = npoint
        self.normalize = normalize
        self.split = split
        self.uniform = uniform
        self.normal_channel = normal_channel
        self.augmentation = (split == 'train') if augmentation is None else augmentation

        self.data_file = os.path.join(self.root, f"{split}_data.npy")
        self.labels_file = os.path.join(self.root, f"{split}_labels.npy")
        self.data = np.load(self.data_file)
        self.labels = np.load(self.labels_file)

        # normalize point clouds
        if self.normalize:
            self.data = np.array([pc_normalize(pc) for pc in self.data])

    def __len__(self):
        return len(self.data)

    def _get_item(self, index):
        """
        Fetches a single sample from the dataset.

        Args:
            index (int): Index of the sample.

        Returns:
            tuple: (point cloud, label)
        """
        point_set = self.data[index]
        label = self.labels[index]

        # If normal_channel is False, remove the normal channels
        if not self.normal_channel:
            point_set = point_set[:, :3]

        # Optionally perform farthest point sampling or random sampling
        if self.uniform:
            point_set = farthest_point_sample(point_set, self.npoints)
        else:
            point_set = random_sample_points_np(point_set, self.npoints)

        point_set = torch.tensor(point_set, dtype=torch.float32)
        label = torch.tensor(label, dtype=torch.long)

        return point_set, label

    def __getitem__(self, index):
        return self._get_item(index)


class ModelNetDataLoaderHybrid(ModelNetDataLoader):
    def __init__(self, root, def_root, npoint=1024, split='train', normalize=True, augmentation=None, uniform=False, normal_channel=True, cache_size=15000):
        """
        Hybrid data loader that combines original ModelNet data with low-frequency point cloud data.

        Args:
            root (str): Root directory containing original data.
            def_root (str): Directory containing low-frequency point cloud data.
            npoint (int): Number of points per sample.
            split (str): 'train' or 'test'.
            uniform (bool): Whether to use farthest point sampling.
            normal_channel (bool): Whether to keep normal channels.
        """
        super().__init__(root, npoint, split, normalize, augmentation, uniform, normal_channel, cache_size)

        self.def_data_file = os.path.join(def_root, f"def_data.npy")
        self.def_labels_file = os.path.join(def_root, f"def_labels.npy")
        self.def_data = np.load(self.def_data_file)
        self.def_labels = np.load(self.def_labels_file)

        # Normalize the defense data
        self.def_data = np.array([pc_normalize(pc) for pc in self.def_data])

        print(f"Original data length: {len(self.data)}")
        print(f"Defense data length: {len(self.def_data)}")

        if self.data.shape[1:] != self.def_data.shape[1:]:
            print(f"Shape mismatch! Original: {self.data.shape}, Defense: {self.def_data.shape}")
            exit(-1)

        # Concatenate the original data and the defense data (hybrid training)
        if split == 'train':
            self.data = np.concatenate([self.data, self.def_data], axis=0)
            self.labels = np.concatenate([self.labels, self.def_labels], axis=0)

        # Shuffle the data for training
        if split == 'train':
            idx = np.random.permutation(len(self.labels))
            self.data = self.data[idx]
            self.labels = self.labels[idx]

        self.num_points = npoint
        self.partition = split
        self.normal_channel = normal_channel
        self.augmentation = (split == 'train')

    def __getitem__(self, index):
        """
        Fetches a single sample from the combined dataset.

        Args:
            index (int): Index of the sample.

        Returns:
            tuple: (point cloud, label)
        """
        return self._get_item(index)


if __name__ == '__main__':
    import torch

    root = '/media/shangli211/4TB_SSD/program_file/Data/'
    def_root = '/media/shangli211/4TB_SSD/program_file/Data/def_data'

    data = ModelNetDataLoaderHybrid(root, def_root, npoint=1024, uniform=False, normal_channel=True)
    DataLoader = torch.utils.data.DataLoader(data, batch_size=6, shuffle=True)
    all_labels = []

    for i, (point, label) in enumerate(DataLoader):
        # 检查并转换数据类型
        point = point.float()
        label = label.long()
