import os.path as osp
import json
import numpy as np
from torch.utils.data import Dataset
from pathlib import Path
from collections import OrderedDict

from .builder import DATASETS
from .pipelines import Compose
from ..utils import get_root_logger
from itertools import chain
import pickle

from sklearn.metrics import accuracy_score, recall_score

@DATASETS.register_module()
class ShapeNetPartSeg(Dataset):
    def __init__(self, root, split, pipeline, test_mode=False, select_classes=None):
        self.root = root
        self.split = split
        self.select_classes = select_classes
        self.logger = get_root_logger(name='ShapeNetPartSeg')

        self.cat = OrderedDict() # E.g. 02691156:Airplane
        for line in open(osp.join(self.root, 'synsetoffset2category.txt')):
            x = line.strip().split()
            self.cat[x[1]] = x[0]
        
        if split == 'trainval':
            file_ids = []
            for s in ['train', 'val']:
                with open(osp.join(self.root, 'train_test_split', f'shuffled_{split}_file_list.json'), 'r') as f:
                    file_ids.extend([d.rstrip() for d in json.load(f)])
        else:
            with open(osp.join(self.root, 'train_test_split', f'shuffled_{split}_file_list.json'), 'r') as f:
                file_ids = [d.rstrip() for d in json.load(f)]
        
        self.datapath = [] # [(category, filepath), ...]
        for file_id in file_ids:
            synset_name = Path(file_id).parent.name
            clsname = self.cat[synset_name]
            if select_classes is not None and clsname not in select_classes:
                continue
            file_name = Path(file_id).name
            file_path = Path(self.root) / synset_name / (file_name + '.txt')
            self.datapath.append(file_path)
        
        self.pipeline = Compose(pipeline)

        self.metricsCalculator = ShapenetPartSegMetrics(select_classes=select_classes)
        self.CLASSES = self.metricsCalculator.CLASSES
        self.part2cls = self.metricsCalculator.part2cls

        self.load_data()
        self.logger.info('The size of %s data is %d' % (split, len(self.points)))
    
    def load_data(self):
        """
        Note: the num_points is inconsistent, it needs to be unified by pipline 
        """
        cache_path = osp.join(self.root, f'shapenet_partseg_{self.split}_cache.pkl')
        if self.select_classes is not None or not osp.exists(cache_path):
            self.logger.info('loading data into memory...')
            all_points = []
            all_part_labels = []
            for i in range(len(self.datapath)):
                fn = self.datapath[i]
                
                file_data = np.loadtxt(fn)
                points = file_data[:, :3].astype(np.float32)
                part_labels = file_data[:, -1].astype(np.int64)

                all_points.append(points)
                all_part_labels.append(part_labels)
            self.logger.info(f'saving cache into {cache_path} ...')
            if self.select_classes is None:
                pickle.dump({
                    'points': all_points,
                    'part_labels': all_part_labels
                }, open(cache_path, 'wb'))
            self.logger.info('finshed loading data')
        else:
            self.logger.info(f'loading cache from {cache_path} ...')
            data = pickle.load(open(cache_path, 'rb'))
            all_points = data['points']
            all_part_labels = data['part_labels']
        self.points = all_points
        self.part_labels = all_part_labels
        self.cls_labels = [self.part2cls[p[0]] for p in self.part_labels]
        assert len(self.points) == len(self.cls_labels) and len(self.points) == len(self.part_labels)
    
    def __len__(self):
        return len(self.cls_labels)

    def __getitem__(self, index):
        results = {
            'points': self.points[index],
            'cls_label': self.cls_labels[index],
            'part_labels': self.part_labels[index].astype(np.int64)
        }
        return self.pipeline(results)


    def evaluate(self,
                 results,
                 metric='accuracy',
                 logger=None):
        """
        Called by evaluate hook during training
        Evaluate the dataset.
        Args:
            results (list): Testing results of the dataset. e.g. generated from single_gpu_test or multi_gpu_test
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['accuracy', 'class_avg_accuracy', 'class_avg_iou', 'inctance_avg_iou']
        if metric not in allowed_metrics:
            raise KeyError(f'metric {metric} is not supported')

        # collect data
        pred_logits = np.concatenate([ret['pred_logits'].detach().cpu().numpy() for ret in results]) # [N, num_parts, npoints]
        gt_cls_labels = np.concatenate([ret['cls_label'].detach().cpu().numpy() for ret in results]) # [N]
        gt_part_labels = np.concatenate([ret['part_labels'].detach().cpu().numpy() for ret in results]) # [N, npoints]

        
        metrics = self.metricsCalculator(pred_logits, gt_part_labels, gt_cls_labels)
        return metrics

class ShapenetPartSegMetrics:
    def __init__(self, select_classes=None):
        label_info = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
                            'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],
                            'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],
                            'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
                            'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
        if select_classes is not None:
            label_info = {cname: label_info[cname] for cname in select_classes}
        
        self.classnames = list(label_info.keys())
        self.classnames.sort()
        self.CLASSES = list(chain.from_iterable(label_info.values()))
        self.CLASSES.sort()

        self.part2cls = {}
        for clsname, parts in label_info.items():
            cls_id = self.classnames.index(clsname)
            for p in parts:
                self.part2cls[p] = cls_id
        self.cls2parts = {self.classnames.index(k): v for k, v in label_info.items()}

        # check cls2parts is valid, it's required by evaluation
        for parts in self.cls2parts.values():
            for i in range(len(parts)):
                assert parts[i] == parts[0] + i
    
    def __call__(self, pred_logits, gt_part_labels, gt_cls_labels):
        """
        Args:
            pred_logits: np.array [N, num_parts, npoints]
            part_label: [N, npoints]
            cls_label: [N]
        return:
            output: dict
        """
        # get pred_part_labels
        N = pred_logits.shape[0]
        npoints = pred_logits.shape[-1]
        part_num = len(self.part2cls)
        cls_num = len(self.cls2parts)

        pred = np.zeros([N, npoints])
        for i in range(N):
            valid_cls_ids = self.cls2parts[gt_cls_labels[i]]
            pred[i, :] = np.argmax(pred_logits[i, valid_cls_ids], 0) + valid_cls_ids[0] # values in cls2parts must be continues
        
        instance_iou = []
        accuracy = []
        recall = []
        for i in range(pred.shape[0]):   # sample_idx
            part_ious = []
            for part in range(part_num):
                I = np.sum(np.logical_and(pred[i] == part, gt_part_labels[i] == part))
                U = np.sum(np.logical_or(pred[i] == part, gt_part_labels[i] == part))
                F = np.sum(gt_part_labels[i] == part)
                if F != 0:       
                    iou = I / float(U)
                    part_ious.append(iou)
            instance_iou.append(np.mean(part_ious))
            accuracy.append(accuracy_score(gt_part_labels[i], pred[i]))
            cls_id = self.part2cls[gt_part_labels[i][0]]
            valid_labels = self.cls2parts[cls_id]
            recall.append(recall_score(gt_part_labels[i], pred[i], average='macro', labels=valid_labels, zero_division=1))
        
        per_cls_iou = np.zeros(cls_num).astype(np.float32)
        per_cls_count = np.zeros(cls_num).astype(np.int32)
        for i in range(pred.shape[0]):
            cur_gt_label = gt_cls_labels[i]
            per_cls_iou[cur_gt_label] += instance_iou[i]
            per_cls_count[cur_gt_label] += 1
        
        for cat_idx in range(cls_num):
            if per_cls_count[cat_idx] > 0:
                per_cls_iou[cat_idx] = per_cls_iou[cat_idx] / per_cls_count[cat_idx]

        metrics = {}
        metrics['accuracy'] = np.mean(accuracy)
        metrics['recall'] = np.mean(recall)
        metrics['ins_avg_iou'] = np.mean(instance_iou)
        metrics['cls_avg_iou'] = np.mean(list(per_cls_iou))
        return metrics