import numpy as np
from os import path as osp
from collections import OrderedDict
from numpy.lib.function_base import average
from sklearn.utils import shuffle
from torch.utils.data import Dataset

from .builder import DATASETS
from .pipelines import Compose
from ..utils import get_root_logger

from sklearn.metrics import accuracy_score, precision_score, recall_score

@DATASETS.register_module()
class ModelNet40(Dataset):
    def __init__(self, root, split, pipeline, presample=None, repeat=1, test_mode=False):
        self.root = root
        self.split = split
        self.logger = get_root_logger(name='ModelNet40')
        assert (split == 'train' or split == 'test')

        self.catfile = osp.join(self.root, 'modelnet40_shape_names.txt')

        self.CLASSES = [line.rstrip() for line in open(self.catfile)] # for compatible
        self.cat_dict = dict(zip(self.CLASSES, range(len(self.CLASSES))))

        shape_ids = [line.rstrip() for line in open(osp.join(self.root, f'modelnet40_{split}.txt'))]

        shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids]
        self.datapath = [(shape_names[i], osp.join(self.root, shape_names[i], shape_ids[i]) + '.txt') for i
                         in range(len(shape_ids))]
        self.pipeline = Compose(pipeline)
        self.presample = presample
        self.repeat = repeat

        self.load_data()
        self.logger.info('The size of %s data is %d' % (split, len(self.points)))
    
    def load_data(self):
        points_save_path = osp.join(self.root, f'modelnet40_{self.split}_points.npy')
        labels_save_path = osp.join(self.root, f'modelnet40_{self.split}_labels.npy')
        if not osp.exists(points_save_path):
            self.logger.info('loading data into memory...')
            all_points = []
            all_labels = []
            for i in range(len(self)):
                fn = self.datapath[i]
                label = self.cat_dict[self.datapath[i][0]]
                label = int(label)
                points = np.loadtxt(fn[1], delimiter=',').astype(np.float32)
                points = points[:, :3]
                all_points.append(points)
                all_labels.append(label)
            all_points = np.stack(all_points)
            all_labels = np.array(all_labels)
            self.logger.info(f'saving into {points_save_path} ...')
            self.logger.info(f'saving into {labels_save_path} ...')
            np.save(points_save_path, all_points)
            np.save(labels_save_path, all_labels)
            self.logger.info('finshed loading data')
        else:
            self.logger.info(f'loading cache from {points_save_path} ...')
            self.logger.info(f'loading cache from {labels_save_path} ...')
            all_points = np.load(points_save_path)
            all_labels = np.load(labels_save_path)
        
        if self.presample is not None:
            new_points = []
            for i in range(all_points.shape[0]):
                pts = all_points[i]
                np.random.shuffle(pts)
                new_points.append(pts[:self.presample])
        self.points = all_points
        self.labels = all_labels
        assert len(self.points) == len(self.labels)

    def __len__(self):
        return len(self.labels) * self.repeat


    def __getitem__(self, index):
        real_index = index % len(self.labels)
        results = {
            'points': self.points[real_index],
            'label': self.labels[real_index]
        }
        return self.pipeline(results)

    def evaluate(self,
                 results,
                 metric='accuracy',
                 logger=None):
        """
        Called by evaluate hook during training
        Evaluate the dataset.
        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['accuracy', 'precision', 'recall']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')
        eval_results = OrderedDict()
        pred_labels = np.concatenate([ret['pred_logits'].argmax(1).detach().cpu().numpy() for ret in results])
        labels = np.concatenate([ret['label'].detach().cpu().numpy() for ret in results])
        eval_results['accuracy'] = accuracy_score(labels, pred_labels)
        recall = recall_score(labels, pred_labels, average=None)
        eval_results['recall_mean'] = np.mean(recall)
        return eval_results
