import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os

from pc_utils import (rotate_point_cloud, PointcloudScaleAndTranslate)
import rs_cnn.data.data_utils as rscnn_d_utils
from rs_cnn.data.ModelNet40Loader import ModelNet40Cls as rscnn_ModelNet40Cls
#import PCT_Pytorch.pointnet2_ops_lib.pointnet2_ops.pointnet2_utils as pointnet2_utils
#from pointnet2_tf.modelnet_h5_dataset import ModelNetH5Dataset as pointnet2_ModelNetH5Dataset
from dgcnn.pytorch.data import ModelNet40 as dgcnn_ModelNet40
from pathlib import Path
import random
from glob import glob
from pointcloudc_utils import PointCloudC as dgcnn_ModelNetC


class ModelNetC(Dataset):
    def __init__(self, split):
        dgcnn_params = {
            "split":  split
        }
        self.dataset = dgcnn_ModelNetC(**dgcnn_params)

    def __len__(self):
        return self.dataset.__len__()

    def __getitem__(self, idx):
        pc, label = self.dataset.__getitem__(idx)
        return {'pc': pc, 'label': label.item()}



# distilled from the following sources:
# https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/data/ModelNet40Loader.py
# https://github.com/Yochengliu/Relation-Shape-CNN/blob/master/train_cls.py
class ModelNet40Rscnn(Dataset):
    def __init__(self, split, data_path, train_data_path,
                 valid_data_path, test_data_path, num_points):

        self.split = split
        self.num_points = num_points
        _transforms = transforms.Compose([rscnn_d_utils.PointcloudToTensor()])
        rscnn_params = {
            'num_points': 1024,  # although it does not matter
            'root': data_path,
            'transforms': _transforms,
            'train': (split in ["train", "valid"]),
            'data_file': {
                'train': train_data_path,
                'valid': valid_data_path,
                'test':  test_data_path
            }[self.split]
        }
        self.rscnn_dataset = rscnn_ModelNet40Cls(**rscnn_params)
        self.PointcloudScaleAndTranslate = PointcloudScaleAndTranslate()

    def __len__(self):
        return self.rscnn_dataset.__len__()

    def __getitem__(self, idx):
        point, label = self.rscnn_dataset.__getitem__(idx)
        # for compatibility with the overall code
        point = np.array(point)
        label = label[0].item()

        return {'pc': point, 'label': label}

    def batch_proc(self, data_batch, device):
        point = data_batch['pc'].to(device)
        if self.split == "train":
            # (B, npoint)
            fps_idx = pointnet2_utils.furthest_point_sample(point, 1200)
            fps_idx = fps_idx[:, np.random.choice(1200, self.num_points,
                                                  False)]
            point = pointnet2_utils.gather_operation(
                point.transpose(1, 2).contiguous(),
                fps_idx).transpose(1, 2).contiguous()  # (B, N, 3)
            point.data = self.PointcloudScaleAndTranslate(point.data)
        else:
            fps_idx = pointnet2_utils.furthest_point_sample(
                point, self.num_points)  # (B, npoint)
            point = pointnet2_utils.gather_operation(
                point.transpose(1, 2).contiguous(),
                fps_idx).transpose(1, 2).contiguous()
        # to maintain compatibility
        point = point.cpu()
        return {'pc': point, 'label': data_batch['label']}


# distilled from the following sources:
# https://github.com/charlesq34/pointnet2/blob/7961e26e31d0ba5a72020635cee03aac5d0e754a/modelnet_h5_dataset.py
# https://github.com/charlesq34/pointnet2/blob/7961e26e31d0ba5a72020635cee03aac5d0e754a/train.py
class ModelNet40PN2(Dataset):
    def __init__(self, split, train_data_path,
                 valid_data_path, test_data_path, num_points):
        self.split = split
        self.dataset_name = 'modelnet40_pn2'
        data_path = {
            "train": train_data_path,
            "valid": valid_data_path,
            "test":  test_data_path
        }[self.split]
        pointnet2_params = {
            'list_filename': data_path,
            # this has nothing to do with actual dataloader batch size
            'batch_size': 32,
            'npoints': num_points,
            'shuffle': False
        }

        # loading all the pointnet2data
        self._dataset = pointnet2_ModelNetH5Dataset(**pointnet2_params)
        all_pc = []
        all_label = []
        while self._dataset.has_next_batch():
            # augmentation here has nothing to do with actual data_augmentation
            pc, label = self._dataset.next_batch(augment=False)
            all_pc.append(pc)
            all_label.append(label)
        self.all_pc = np.concatenate(all_pc)
        self.all_label = np.concatenate(all_label)

    def __len__(self):
        return self.all_pc.shape[0]

    def __getitem__(self, idx):
        return {'pc': self.all_pc[idx], 'label': np.int64(self.all_label[idx])}

    def batch_proc(self, data_batch, device):
        if self.split == "train":
            point = np.array(data_batch['pc'])
            point = self._dataset._augment_batch_data(point)
            # converted to tensor to maintain compatibility with the other code
            data_batch['pc'] = torch.tensor(point)
        else:
            pass

        return data_batch


class ModelNet40Dgcnn(Dataset):
    def __init__(self, split, train_data_path,
                 valid_data_path, test_data_path, num_points):
        self.split = split
        self.data_path = {
            "train": train_data_path,
            "valid": valid_data_path,
            "test":  test_data_path
        }[self.split]

        dgcnn_params = {
            'partition': 'train' if split in ['train', 'valid'] else 'test',
            'num_points': num_points,
            "data_path":  self.data_path
        }
        self.dataset = dgcnn_ModelNet40(**dgcnn_params)

    def __len__(self):
        return self.dataset.__len__()

    def __getitem__(self, idx):
        pc, label = self.dataset.__getitem__(idx)
        return {'pc': pc, 'label': label.item()}




class ModelNet40DgcnnWI(Dataset):
    def __init__(self, split, train_data_path,
                 valid_data_path, test_data_path, num_points):
        self.split = split
        self.data_path = {
            "train": train_data_path,
            "valid": valid_data_path,
            "test":  test_data_path
        }[self.split]

        dgcnn_params = {
            'partition': 'train' if split in ['train', 'valid'] else 'test',
            'num_points': num_points,
            "data_path":  self.data_path
        }
        self.dataset = dgcnn_ModelNet40(**dgcnn_params)

    def __len__(self):
        return self.dataset.__len__()

    def __getitem__(self, idx):
        pc, label = self.dataset.__getitem__(idx)
        return {'pc': pc, 'label': label.item()}, idx





def load_data(data_path,corruption,severity):

    DATA_DIR = os.path.join(data_path, 'data_' + corruption + '_' +str(severity) + '.npy')
    # if corruption in ['occlusion']:
    #     LABEL_DIR = os.path.join(data_path, 'label_occlusion.npy')
    LABEL_DIR = os.path.join(data_path, 'label.npy')
    all_data = np.load(DATA_DIR)
    all_label = np.load(LABEL_DIR)
    return all_data, all_label


import h5py
import warnings
from torch.utils.data import Dataset

warnings.filterwarnings('ignore')


class ScanObjectNNDataLoader(Dataset):
    def __init__(self, split, bg=False):

        self.splits = split
        self.folder = 'training' if self.splits in ['train', 'valid'] else 'test'
        # self.num_points = num_points
        # _transforms = transforms.Compose([rscnn_d_utils.PointcloudToTensor()])
        # rscnn_params = {
        #     'num_points': 1024,  # although it does not matter
        #     'root': data_path,
        #     'transforms': _transforms,
        #     'train': (split in ["train", "valid"]),
        #     'data_file': {
        #         'train': train_data_path,
        #         'valid': valid_data_path,
        #         'test':  test_data_path
        #     }[self.split]
        # }
    # def __init__(self, root, split='training', bg=True):
        self.root = '/mnt/mfs/zhangjinlai/data/h5_files'

        # assert (split == 'train' or split == 'test')
        if bg:
            print('Use data with background points')
            dir_name = 'main_split'
        else:
            print('Use data without background points')
            dir_name = 'main_split_nobg'
        file_name = '_objectdataset.h5'
        h5_name = '{}/{}/{}'.format(self.root, dir_name, self.folder + file_name)
        with h5py.File(h5_name, mode="r") as f:
            self.data = f['data'][:].astype('float32')
            self.label = f['label'][:].astype('int64')
        print('The size of %s data is %d' % (split, self.data.shape[0]))

    def __len__(self):
        return self.data.shape[0]

    def __getitem__(self, index):
        #print(self.data[index].shape)
        # return self.data[index].T, self.label[index]
        return {'pc': self.data[index][:1024,:3], 'label': self.label[index]}



class ScanObjectNNDataLoaderWI(Dataset):
    def __init__(self, split, bg=False):

        self.splits = split
        self.folder = 'training' if self.splits in ['train', 'valid'] else 'test'
        # self.num_points = num_points
        # _transforms = transforms.Compose([rscnn_d_utils.PointcloudToTensor()])
        # rscnn_params = {
        #     'num_points': 1024,  # although it does not matter
        #     'root': data_path,
        #     'transforms': _transforms,
        #     'train': (split in ["train", "valid"]),
        #     'data_file': {
        #         'train': train_data_path,
        #         'valid': valid_data_path,
        #         'test':  test_data_path
        #     }[self.split]
        # }
    # def __init__(self, root, split='training', bg=True):
        self.root = '/mnt/mfs/zhangjinlai/data/h5_files'

        # assert (split == 'train' or split == 'test')
        if bg:
            print('Use data with background points')
            dir_name = 'main_split'
        else:
            print('Use data without background points')
            dir_name = 'main_split_nobg'
        file_name = '_objectdataset.h5'
        h5_name = '{}/{}/{}'.format(self.root, dir_name, self.folder + file_name)
        with h5py.File(h5_name, mode="r") as f:
            self.data = f['data'][:].astype('float32')
            self.label = f['label'][:].astype('int64')
        print('The size of %s data is %d' % (split, self.data.shape[0]))

    def __len__(self):
        return self.data.shape[0]

    def __getitem__(self, index):
        #print(self.data[index].shape)
        # return self.data[index].T, self.label[index]
        return {'pc': self.data[index][:1024,:3], 'label': self.label[index]}, index


def read_off(file):
    if 'OFF' != file.readline().strip():
        raise('Not a valid OFF header')
    n_verts, n_faces, _ = tuple([int(s) for s in file.readline().strip().split(' ')])
    verts = [[float(s) for s in file.readline().strip().split(' ')] for i_vert in range(n_verts)]
    faces = [[int(s) for s in file.readline().strip().split(' ')][1:] for i_face in range(n_faces)]
    return verts, faces


class PointSampler(object):
    def __init__(self, output_size):
        assert isinstance(output_size, int)
        self.output_size = output_size

    def triangle_area(self, pt1, pt2, pt3):
        side_a = np.linalg.norm(pt1 - pt2)
        side_b = np.linalg.norm(pt2 - pt3)
        side_c = np.linalg.norm(pt3 - pt1)
        s = 0.5 * (side_a + side_b + side_c)
        return max(s * (s - side_a) * (s - side_b) * (s - side_c), 0) ** 0.5

    def sample_point(self, pt1, pt2, pt3):
        # barycentric coordinates on a triangle
        # https://mathworld.wolfram.com/BarycentricCoordinates.html
        s, t = sorted([random.random(), random.random()])
        f = lambda i: s * pt1[i] + (t - s) * pt2[i] + (1 - t) * pt3[i]
        return (f(0), f(1), f(2))

    def __call__(self, mesh):
        verts, faces = mesh
        verts = np.array(verts)
        areas = np.zeros((len(faces)))

        for i in range(len(areas)):
            areas[i] = (self.triangle_area(verts[faces[i][0]],
                                           verts[faces[i][1]],
                                           verts[faces[i][2]]))

        sampled_faces = (random.choices(faces,
                                        weights=areas,
                                        cum_weights=None,
                                        k=self.output_size))

        sampled_points = np.zeros((self.output_size, 3))

        for i in range(len(sampled_faces)):
            sampled_points[i] = (self.sample_point(verts[sampled_faces[i][0]],
                                                   verts[sampled_faces[i][1]],
                                                   verts[sampled_faces[i][2]]))

        return sampled_points


class Normalize(object):
    def __call__(self, pointcloud):
        assert len(pointcloud.shape) == 2

        norm_pointcloud = pointcloud - np.mean(pointcloud, axis=0)
        norm_pointcloud /= np.max(np.linalg.norm(norm_pointcloud, axis=1))
        # norm_pointcloud.dtype ="float32"
        return norm_pointcloud


class RandRotation_z(object):
    def __call__(self, pointcloud):
        assert len(pointcloud.shape) == 2

        theta = random.random() * 2. * math.pi
        rot_matrix = np.array([[math.cos(theta), -math.sin(theta), 0],
                               [math.sin(theta), math.cos(theta), 0],
                               [0, 0, 1]])

        rot_pointcloud = rot_matrix.dot(pointcloud.T).T
        return rot_pointcloud


class RandomNoise(object):
    def __call__(self, pointcloud):
        assert len(pointcloud.shape) == 2

        noise = np.random.normal(0, 0.02, (pointcloud.shape))

        noisy_pointcloud = pointcloud + noise
        return noisy_pointcloud

class ToTensor(object):
    def __call__(self, pointcloud):
        assert len(pointcloud.shape)==2

        return torch.from_numpy(pointcloud)


def default_transforms():
    return transforms.Compose([
                                PointSampler(1024),
                                Normalize(),
                                # ToTensor()
                              ])

# class ModelNet10(Dataset):
#     def __init__(self, split, root_dir, valid=False, transform=default_transforms()):
#         self.root_dir = path #root_dir
#         self.split = split
#         self.folder = 'train' if self.split in ['train', 'valid'] else 'test'
#         folders = [directory for directory in sorted(os.listdir(self.root_dir)) if os.path.isdir(self.root_dir/directory)]
#         self.classes = {folder: i for i, folder in enumerate(folders)}
#         self.transforms = transform if not valid else default_transforms()
#         self.valid = valid
#         self.files = []
#         for category in self.classes.keys():
#             new_dir = root_dir/Path(category)/self.folder
#             for file in os.listdir(new_dir):
#                 if file.endswith('.off'):
#                     sample = {}
#                     sample['pcd_path'] = new_dir/file
#                     sample['category'] = category
#                     self.files.append(sample)

#     def __len__(self):
#         return len(self.files)

#     def __preproc__(self, file):
#         verts, faces = read_off(file)
#         if self.transforms:
#             pointcloud = self.transforms((verts, faces))
#         return pointcloud

#     def __getitem__(self, idx):
#         pcd_path = self.files[idx]['pcd_path']
#         category = self.files[idx]['category']
#         with open(pcd_path, 'r') as f:
#             pointcloud = self.__preproc__(f)
#         # print(pointcloud[:,:3].shape)
#         pc = pointcloud[:,:3]
#         return {'pc': pc,
#                 'label': self.classes[category]}

    # def batch_proc(self, data_batch, device):
    #     if self.split == "train":
    #         point = np.array(data_batch['pc'], dtype="float32")
    #         point = self._dataset._augment_batch_data(point)
    #         # converted to tensor to maintain compatibility with the other code
    #         data_batch['pc'] = torch.tensor(point)
    #     else:
    #         pass

    #     return data_batch



class ModelNet10(Dataset):
    def __init__(self, split, root_dir, bg=False):

        self.split = split
        self.root = '/mnt/mfs/zhangjinlai/data/modelnet10_hdf5_2048'
        self.path_h5py_all = []
        if self.split in ['train','valid','all']:   
            self.get_path('train')
        if self.split in ['test', 'all']:   
            self.get_path('test')

        # assert (split == 'train' or split == 'test')
        self.path_h5py_all.sort()
        data, label= self.load_h5py(self.path_h5py_all)
        self.data = np.concatenate(data, axis=0)
        self.label = np.concatenate(label, axis=0) 
        # if bg:
        #     print('Use data with background points')
        #     dir_name = 'main_split'
        # else:
        #     print('Use data without background points')
        #     dir_name = 'main_split_nobg'
        # file_name = '_objectdataset_augmentedrot_scale75.h5'
        # h5_name = '{}/{}/{}'.format(self.root, dir_name, self.folder + file_name)
        # with h5py.File(h5_name, mode="r") as f:
        #     self.data = f['data'][:].astype('float32')
        #     self.label = f['label'][:].astype('int64')
        print('The size of %s data is %d' % (split, len(self.data)))

    def get_path(self, type):
        path_h5py = os.path.join(self.root, '*%s*.h5'%type)
        self.path_h5py_all += glob(path_h5py)
        # if self.load_name:
        #     path_json = os.path.join(self.root, '%s*_id2name.json'%type)
        #     self.path_name_all += glob(path_json)
        # if self.load_file:
        #     path_json = os.path.join(self.root, '%s*_id2file.json'%type)
        #     self.path_file_all += glob(path_json)
        return 
    def load_h5py(self, path):

        all_data = []
        all_label = []
        all_seg = []
        for h5_name in path:
            f = h5py.File(h5_name, 'r+')
            data = f['data'][:].astype('float32')
            label = f['label'][:].astype('int64')
            f.close()
            all_data.append(data)
            all_label.append(label)
        return all_data, all_label
    
    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        #print(self.data[index].shape)
        # return self.data[index].T, self.label[index]
        return {'pc': self.data[index][:1024,:3], 'label': self.label[index].squeeze(0)}



class ModelNet10WI(Dataset):
    def __init__(self, split, root_dir, bg=False):

        self.split = split
        self.root = '/mnt/mfs/zhangjinlai/data/modelnet10_hdf5_2048'
        self.path_h5py_all = []
        if self.split in ['train','valid','all']:   
            self.get_path('train')
        if self.split in ['test', 'all']:   
            self.get_path('test')

        # assert (split == 'train' or split == 'test')
        self.path_h5py_all.sort()
        data, label= self.load_h5py(self.path_h5py_all)
        self.data = np.concatenate(data, axis=0)
        self.label = np.concatenate(label, axis=0) 
        # if bg:
        #     print('Use data with background points')
        #     dir_name = 'main_split'
        # else:
        #     print('Use data without background points')
        #     dir_name = 'main_split_nobg'
        # file_name = '_objectdataset_augmentedrot_scale75.h5'
        # h5_name = '{}/{}/{}'.format(self.root, dir_name, self.folder + file_name)
        # with h5py.File(h5_name, mode="r") as f:
        #     self.data = f['data'][:].astype('float32')
        #     self.label = f['label'][:].astype('int64')
        print('The size of %s data is %d' % (split, len(self.data)))

    def get_path(self, type):
        path_h5py = os.path.join(self.root, '*%s*.h5'%type)
        self.path_h5py_all += glob(path_h5py)
        # if self.load_name:
        #     path_json = os.path.join(self.root, '%s*_id2name.json'%type)
        #     self.path_name_all += glob(path_json)
        # if self.load_file:
        #     path_json = os.path.join(self.root, '%s*_id2file.json'%type)
        #     self.path_file_all += glob(path_json)
        return 
    def load_h5py(self, path):

        all_data = []
        all_label = []
        all_seg = []
        for h5_name in path:
            f = h5py.File(h5_name, 'r+')
            data = f['data'][:].astype('float32')
            label = f['label'][:].astype('int64')
            f.close()
            all_data.append(data)
            all_label.append(label)
        return all_data, all_label
    
    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        # return self.data[index].T, self.label[index]
        return {'pc': self.data[index][:1024,:3], 'label': self.label[index].squeeze(0)}, index



class ModelNet40C(Dataset):
    def __init__(self, split, test_data_path,corruption,severity):
        assert split == 'test'
        self.split = split
        self.data_path = {
            "test":  test_data_path
        }[self.split]
        self.corruption = corruption
        self.severity = severity

        self.data, self.label = load_data(self.data_path, self.corruption, self.severity)
        # self.num_points = num_points
        self.partition =  'test'

    def __getitem__(self, item):
        pointcloud = self.data[item]#[:self.num_points]
        label = self.label[item]
        return {'pc': pointcloud, 'label': label.item()}

    def __len__(self):
        return self.data.shape[0]


def create_dataloader(split, cfg):
    num_workers = cfg.DATALOADER.num_workers
    batch_size = cfg.DATALOADER.batch_size
    dataset_args = {
        "split": split
    }

    if cfg.EXP.DATASET == "modelnet40_rscnn":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_RSCNN))
        # augmentation directly done in the code so that
        # it is as similar to the vanilla code as possible
        dataset = ModelNet40Rscnn(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet40_pn2":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_PN2))
        dataset = ModelNet40PN2(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet40_dgcnn":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_DGCNN))
        dataset = ModelNet40Dgcnn(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet40_dgcnnwi":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_DGCNN))
        dataset = ModelNet40DgcnnWI(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet10":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET10))
        dataset = ModelNet10(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet10wi":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET10))
        dataset = ModelNet10WI(**dataset_args)
    elif cfg.EXP.DATASET == "scanobjectnn":
        dataset_args.update(dict(**cfg.DATALOADER.SCANOBJECTNN))
        dataset = ScanObjectNNDataLoader(**dataset_args)
    elif cfg.EXP.DATASET == "scanobjectnnwi":
        dataset_args.update(dict(**cfg.DATALOADER.SCANOBJECTNN))
        dataset = ScanObjectNNDataLoaderWI(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet40_c":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET40_C))
        dataset = ModelNet40C(**dataset_args)
    elif cfg.EXP.DATASET == "modelnet_c":
        dataset_args.update(dict(**cfg.DATALOADER.MODELNET_C))
        dataset = ModelNetC(**dataset_args)
    else:
        assert False

    if "batch_proc" not in dir(dataset):
        dataset.batch_proc = None

    return DataLoader(
        dataset,
        batch_size,
        num_workers=num_workers,
        shuffle=(split == "train"),
        drop_last=(split == "train"),
        pin_memory=(torch.cuda.is_available()) and (not num_workers)
    )

