from os.path import join

import numpy

from helper_ply import read_ply
from tool import ConfigSensatUrban as cfg
from tool import DataProcessing as DP
from tool import Plot
from torch.utils.data import Dataset
import numpy as np
import time, pickle
import torch

import my_tool

path_separator = my_tool.path_div_separator()


class SensatUrban(Dataset):
    '''
        mode: training or validation or test
    '''

    def __init__(self, mode):

        self.mode = mode
        self.name = 'SensatUrban'
        # root_path = r'/home-ustc/lzp21/dataset'  # path to the dataset
        root_path = r'/home/data3/lzp'
        # root_path = r'D:/dataset/SensatUrban'
        self.path = join(root_path, self.name)
        self.label_to_names = {0: 'Ground', 1: 'High Vegetation', 2: 'Buildings', 3: 'Walls',
                               4: 'Bridge', 5: 'Parking', 6: 'Rail', 7: 'traffic Roads', 8: 'Street Furniture',
                               9: 'Cars', 10: 'Footpath', 11: 'Bikes', 12: 'Water'}
        self.num_classes = len(self.label_to_names)
        self.label_values = np.sort([k for k, v in self.label_to_names.items()])
        self.label_to_idx = {l: i for i, l in enumerate(self.label_values)}
        self.ignored_labels = np.array([])

        self.use_val = True  # whether use validation set or not
        self.train_list, self.val_list, self.test_list = my_tool.get_file_list(self.path, self.use_val)

        print('------------ CREATE DATASET '+self.mode+' ---------------')

        # TODO remove testcode
        if self.mode == 'training':
            self.data_list = self.train_list
            self.num_per_epoch = cfg.train_steps * cfg.batch_size
        elif self.mode == 'validation':
            self.data_list = self.val_list
            self.num_per_epoch = cfg.val_steps * cfg.val_batch_size
        elif self.mode == 'test':
            self.data_list = self.test_list
            self.num_per_epoch = cfg.val_steps * cfg.val_batch_size

        # initialize
        self.num_per_class = np.zeros(self.num_classes)
        self.val_proj = []
        self.val_labels = []
        self.test_proj = []
        self.test_labels = []
        self.possibility = []
        self.min_possibility = []
        self.input_trees = []
        self.input_colors = []
        self.input_labels = []
        self.input_names = []

        # TODO class_weights depend traning

        self.load_sub_sampled_clouds(cfg.sub_grid_size)
        if (mode == 'training'):
            cfg.class_weights = DP.get_class_weights(self.num_per_class)

        # Reset possibility
        self.possibility = []
        self.min_possibility = []
        for i, tree in enumerate(self.input_colors):
            self.possibility += [np.random.rand(tree.data.shape[0]) * 1e-3]
            self.min_possibility += [float(np.min(self.possibility[-1]))]

        for ignore_label in self.ignored_labels:
            self.num_per_class = np.delete(self.num_per_class, ignore_label)

        cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]

        print('------------ CREATE DATASET '+self.mode+' FINISHED---------------')

    def __len__(self):
        # return len(self.data_list)
        return self.num_per_epoch

    def __getitem__(self, item):
        queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cloud_idx = self.spatially_regular_gen(item)
        return queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cloud_idx

    def load_sub_sampled_clouds(self, sub_grid_size):
        tree_path = join(self.path, 'grid_{:.3f}'.format(sub_grid_size))

        for i, file_path in enumerate(self.data_list):
            t0 = time.time()
            cloud_name = file_path.split(path_separator)[-1][:-4]

            # Name of the input files
            kd_tree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))

            data = read_ply(sub_ply_file)
            sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
            sub_labels = data['class']

            # compute num_per_class in training set
            if self.mode == 'training':
                self.num_per_class += DP.get_num_class_from_label(sub_labels, self.num_classes)

            # Read pkl with search tree
            with open(kd_tree_file, 'rb') as f:
                search_tree = pickle.load(f)

            self.input_trees += [search_tree]
            self.input_colors += [sub_colors]
            self.input_labels += [sub_labels]
            self.input_names += [cloud_name]

            size = sub_colors.shape[0] * 4 * 7
            print('{:s} {:.1f} MB loaded in {:.1f}s'.format(kd_tree_file.split(path_separator)[-1], size * 1e-6,
                                                            time.time() - t0))

        print('\nPreparing reprojected indices for testing')

        # Get validation and test reprojected indices

        for i, file_path in enumerate(self.data_list):
            t0 = time.time()
            cloud_name = file_path.split(path_separator)[-1][:-4]

            # val projection and labels
            if self.mode == 'validation':
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                with open(proj_file, 'rb') as f:
                    proj_idx, labels = pickle.load(f)
                self.val_proj += [proj_idx]
                self.val_labels += [labels]
                print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))

            # test projection and labels
            if self.mode == 'test':
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                with open(proj_file, 'rb') as f:
                    proj_idx, labels = pickle.load(f)
                self.test_proj += [proj_idx]
                self.test_labels += [labels]
                print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))

    def spatially_regular_gen(self, item):
        # TODO item used by val

        # if self.mode == 'debug':
        #     a = item + 1
        # Generator loop
        # Choose a random cloud
        cloud_idx = int(np.argmin(self.min_possibility))

        # choose the point with the minimum of possibility as query point
        point_ind = np.argmin(self.possibility[cloud_idx])

        # Get points from tree structure
        points = np.array(self.input_trees[cloud_idx].data, copy=False)

        # Center point of input region
        center_point = points[point_ind, :].reshape(1, -1)

        # Add noise to the center point
        noise = np.random.normal(scale=cfg.noise_init / 10, size=center_point.shape)
        pick_point = center_point + noise.astype(center_point.dtype)

        if len(points) < cfg.num_points:
            queried_idx = self.input_trees[cloud_idx].query(pick_point, k=len(points))[1][0]
        else:
            queried_idx = self.input_trees[cloud_idx].query(pick_point, k=cfg.num_points)[1][0]

        queried_idx = DP.shuffle_idx(queried_idx)
        # Collect points and colors
        queried_pc_xyz = points[queried_idx]
        queried_pc_xyz = queried_pc_xyz - pick_point
        queried_pc_colors = self.input_colors[cloud_idx][queried_idx]
        queried_pc_labels = self.input_labels[cloud_idx][queried_idx]

        dists = np.sum(np.square((points[queried_idx] - pick_point).astype(np.float32)), axis=1)
        delta = np.square(1 - dists / np.max(dists))
        self.possibility[cloud_idx][queried_idx] += delta
        self.min_possibility[cloud_idx] = float(np.min(self.possibility[cloud_idx]))

        if len(points) < cfg.num_points:
            queried_pc_xyz, queried_pc_colors, queried_idx, queried_pc_labels = \
                DP.data_aug(queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cfg.num_points)

        return (queried_pc_xyz.astype(np.float32),
                queried_pc_colors.astype(np.float32),
                queried_pc_labels,
                queried_idx.astype(np.int32),
                np.array([cloud_idx], dtype=np.int32))

    def map(self, batch_xyz, batch_features, batch_labels, batch_pc_idx, batch_cloud_idx):
        # batch_features = tf.concat([batch_xyz, batch_features], axis=-1)
        batch_features = numpy.concatenate((batch_xyz, batch_features), axis=-1)
        input_points = []
        input_neighbors = []
        input_pools = []
        input_up_samples = []

        for i in range(cfg.num_layers):
            # neighbour_idx = tf.py_func(DP.knn_search, [batch_xyz, batch_xyz, cfg.k_n], tf.int32)
            neighbour_idx = DP.knn_search(batch_xyz, batch_xyz, cfg.k_n)
            sub_points = batch_xyz[:, :batch_xyz.shape[1] // cfg.sub_sampling_ratio[i], :]
            pool_i = neighbour_idx[:, :batch_xyz.shape[1] // cfg.sub_sampling_ratio[i], :]
            # up_i = tf.py_func(DP.knn_search, [sub_points, batch_xyz, 1], tf.int32)
            up_i = DP.knn_search(sub_points, batch_xyz, 1)
            input_points.append(batch_xyz)
            input_neighbors.append(neighbour_idx)
            input_pools.append(pool_i)
            input_up_samples.append(up_i)
            batch_xyz = sub_points

        input_list = input_points + input_neighbors + input_pools + input_up_samples
        input_list += [batch_features, batch_labels, batch_pc_idx, batch_cloud_idx]

        return input_list

    def collate_fn(self, batch):
        batch_xyz, batch_features, batch_labels, batch_pc_idx, batch_cloud_idx = [], [], [], [], []
        for i in range(len(batch)):
            batch_xyz.append(batch[i][0])
            batch_features.append(batch[i][1])
            batch_labels.append(batch[i][2])
            batch_pc_idx.append(batch[i][3])
            batch_cloud_idx.append(batch[i][4])

        batch_xyz = np.stack(batch_xyz)
        batch_features = np.stack(batch_features)
        batch_labels = np.stack(batch_labels)
        batch_pc_idx = np.stack(batch_pc_idx)
        batch_cloud_idx = np.stack(batch_cloud_idx)

        flat_inputs = self.map(batch_xyz, batch_features, batch_labels, batch_pc_idx, batch_cloud_idx)

        num_layers = cfg.num_layers
        inputs = {}
        inputs['xyz'] = []
        for tmp in flat_inputs[:num_layers]:
            inputs['xyz'].append(torch.from_numpy(tmp).float())
        inputs['neigh_idx'] = []
        for tmp in flat_inputs[num_layers: 2 * num_layers]:
            inputs['neigh_idx'].append(torch.from_numpy(tmp).long())
        inputs['sub_idx'] = []
        for tmp in flat_inputs[2 * num_layers:3 * num_layers]:
            inputs['sub_idx'].append(torch.from_numpy(tmp).long())
        inputs['interp_idx'] = []
        for tmp in flat_inputs[3 * num_layers:4 * num_layers]:
            inputs['interp_idx'].append(torch.from_numpy(tmp).long())
        inputs['features'] = torch.from_numpy(flat_inputs[4 * num_layers]).transpose(1, 2).float()
        inputs['labels'] = torch.from_numpy(flat_inputs[4 * num_layers + 1]).long()
        inputs['input_inds'] = torch.from_numpy(flat_inputs[4 * num_layers + 2]).long()
        inputs['cloud_inds'] = torch.from_numpy(flat_inputs[4 * num_layers + 3]).long()
        # inputs['xyz'] = flat_inputs[:num_layers]
        # inputs['neigh_idx'] = flat_inputs[num_layers: 2 * num_layers]
        # inputs['sub_idx'] = flat_inputs[2 * num_layers:3 * num_layers]
        # inputs['interp_idx'] = flat_inputs[3 * num_layers:4 * num_layers]
        # inputs['features'] = flat_inputs[4 * num_layers]
        # inputs['labels'] = flat_inputs[4 * num_layers + 1]
        # inputs['input_inds'] = flat_inputs[4 * num_layers + 2]
        # inputs['cloud_inds'] = flat_inputs[4 * num_layers + 3]

        return inputs

    # def get_batch_gen(self, split):
    #     if split == 'training':
    #         num_per_epoch = cfg.train_steps * cfg.batch_size
    #     elif split == 'validation':
    #         num_per_epoch = cfg.val_steps * cfg.val_batch_size
    #     else:
    #         num_per_epoch = cfg.val_steps * cfg.val_batch_size
    #
    #     # Reset possibility
    #     self.possibility[split] = []
    #     self.min_possibility[split] = []
    #     for i, tree in enumerate(self.input_colors[split]):
    #         self.possibility[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]
    #         self.min_possibility[split] += [float(np.min(self.possibility[split][-1]))]
