"""
use modelnet40_ply_hdf5_2048.zip
borrowed from CurveNet https://github.com/tiangexiang/CurveNet
"""
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
from collections import OrderedDict
from pathlib import Path

from .builder import DATASETS
from .pipelines import Compose
from sklearn.metrics import accuracy_score, precision_score, recall_score

def load_data_cls(partition, data_root, shuffle=False):
    'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
    all_data = []
    all_label = []
    for h5_name in glob.glob(os.path.join(data_root, '*%s*.h5'%partition)):
        f = h5py.File(h5_name, 'r+')
        data = f['data'][:].astype('float32')
        if shuffle:
            idx = np.arange(data.shape[1])
            np.random.shuffle(idx)
            data = data[:, idx]
        label = f['label'][:].astype('int64')
        f.close()
        all_data.append(data)
        all_label.append(label)
    all_data = np.concatenate(all_data, axis=0)
    all_label = np.concatenate(all_label, axis=0).reshape([-1])
    return all_data, all_label


def translate_pointcloud(pointcloud):
    xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
    xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
       
    translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
    return translated_pointcloud

@DATASETS.register_module()
class ModelNet40_ply(Dataset):
    def __init__(self, root, split, pipeline, test_mode=False, num_points=4096, select_classes=None, point_shuffle=True):
        self.data, self.label = load_data_cls(split, root, shuffle=point_shuffle)
        self.num_points = num_points
        self.split = split
        self.pipeline = Compose(pipeline)

        shapenames_file = Path(root) / 'shape_names.txt'
        self.CLASSES = [x.rstrip() for x in open(shapenames_file).readlines() if x.rstrip()]
        assert len(self.CLASSES) == 40

        if select_classes is not None:
            mask = np.full_like(self.label, False, dtype=bool)
            for i, c in enumerate(select_classes):
                assert c in self.CLASSES
                mask |= self.label == self.CLASSES.index(c)
                self.label[mask] = i
            self.data = self.data[mask]
            self.label = self.label[mask]
            self.CLASSES = select_classes

    def __getitem__(self, item):
        pointcloud = self.data[item][:self.num_points]
        label = self.label[item]
        result = {
            'points': pointcloud,
            'label': label
        }
        return self.pipeline(result)

    def __len__(self):
        return self.data.shape[0]
    
    
    def evaluate(self,
                 results,
                 metric='accuracy',
                 logger=None):
        """
        Called by evaluate hook during training
        Evaluate the dataset.
        Args:
            results (list): Testing results of the dataset. e.g. generated from single_gpu_test or multi_gpu_test
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['accuracy', 'precision', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        eval_results = OrderedDict()
        pred_labels = np.concatenate([ret['pred_logits'].argmax(1).detach().cpu().numpy() for ret in results])
        labels = np.concatenate([ret['label'].detach().cpu().numpy() for ret in results])
        eval_results['accuracy'] = accuracy_score(labels, pred_labels)
        eval_results['recall_mean'] = recall_score(labels, pred_labels, average='macro')
        return eval_results