from __future__  import print_function
import os
import json
import glob
import torch
import numpy as np
import torchvision.transforms as transforms
import torch.utils.data as data
from auxiliary.utils import *
from auxiliary.pc_utils import * 

import pickle
import os.path
import sys



class KITTIMultiObjectDataset(data.Dataset):

    def __init__(self, root='./data', num_points=2048, split='train'):

        with open(os.path.join(root, 'kittimulobj/kitti_mulobj_param_' + split.lower() + '_' + str(num_points) +'.pkl'), 'rb') as pickle_file:
            dict_dataset = pickle.load(pickle_file)
        with open(os.path.join(root, 'kittimulobj', dict_dataset['base_dataset'] +'.pkl'), 'rb') as pickle_file:
            self.obj_dataset = pickle.load(pickle_file)
        self.name_dict={'Pedestrian':0, 'Car':1, 'Cyclist':2, 'Van':3, 'Truck':4}
        self.npoints = num_points
        self.scene_radius = dict_dataset['scene_radius']
        self.total = dict_dataset['total']
        self.num_data_batch = dict_dataset['num_batch']
        self.batch_num_model = dict_dataset['batch_num_model']
        self.batch_num_example = dict_dataset['batch_num_example']
        self.data_list = dict_dataset['list_example']
        self.datapath = os.path.join(root, 'kittimulobj/kitti_single')
        self.max_obj_num = np.max(self.batch_num_model)
        self.obj_type_num = len(self.name_dict)
        if split.lower().find('test') >= 0: 
            self.test = True
        else: self.test = False

    def __getitem__(self, index):

        label = np.ones(self.max_obj_num, dtype=int) * -1
        point_set = []
        num_points_each = int(np.ceil(self.npoints / len(self.data_list[index]['idx'])))
        for cnt, idx_obj in enumerate(self.data_list[index]['idx']): # take out the models one-by-one
            obj_pc = np.fromfile(os.path.join(self.datapath, os.path.basename(self.obj_dataset[idx_obj]['path'])), dtype=np.float32).reshape(-1, 4) # read
            """
            vis
            """
            # write_pc_as_ply(obj_pc[:,:3], 'step1.ply')
            
            if self.test: np.random.seed(0)
            obj_pc = obj_pc[np.random.choice(obj_pc.shape[0],num_points_each), 0:3] # sample

            # write_pc_as_ply(obj_pc[:,:3], 'step2.ply')
            label[cnt] = self.name_dict[self.obj_dataset[idx_obj]['name']]
            obj_pc = self.pc_normalize(obj_pc, self.obj_dataset[idx_obj]['box3d_lidar'], self.obj_dataset[idx_obj]['name']) # normalize
            # write_pc_as_ply(obj_pc[:,:3], 'step3.ply')
            obj_pc[:,:2] += self.data_list[index]['coor'][cnt][:2] # translate
            # write_pc_as_ply(obj_pc[:,:3], 'step4.ply')
            point_set.append(obj_pc)
        point_set = np.vstack(point_set)[:self.npoints,:]

        # return point_set.astype(np.float32), label
        """
        vis: 
        write_pc_as_ply(point_set,'a.ply')
        """

        

        return torch.from_numpy(point_set.astype(np.float32))

    def __len__(self):
        return self.total

    # pc: NxC, return NxC
    def pc_normalize(self, pc, bbox, label):
        pc -= bbox[:3]
        box_len = np.sqrt(bbox[3] ** 2 + bbox[4] ** 2 + bbox[5] ** 2)
        pc = pc / (box_len) * 2
        if label == 'Pedestrian' or label == 'Cyclist': 
            pc /= 2 # shrink the point sets for models with a person to better emulate a driving scene
        return pc







class ShapeNet(data.Dataset):
    def __init__(self,train=True,options=None):
        rootimg = "./data/ShapeNet/ShapeNetRendering"
        rootpc = "./data/customShapeNet"
        class_choice = None
        npoints = options.npoint
        normal = False
        balanced = False
        gen_view=False
        SVR=False
        idx=0
        self.balanced = balanced
        self.normal = normal
        self.train = train
        self.rootimg = rootimg
        self.rootpc = rootpc
        self.npoints = npoints
        self.datapath = []
        self.catfile = os.path.join('./data/synsetoffset2category.txt')
        self.cat = {}
        self.meta = {}
        self.SVR = SVR
        self.gen_view = gen_view
        self.idx=idx
        with open(self.catfile, 'r') as f:
            for line in f:
                ls = line.strip().split()
                self.cat[ls[0]] = ls[1]
        if not class_choice is  None:
            self.cat = {k:v for k,v in self.cat.items() if k in class_choice}
        print(self.cat)
        empty = []
        for item in self.cat:
            dir_img  = os.path.join(self.rootimg, self.cat[item])
            fns_img = sorted(os.listdir(dir_img))

            try:
                dir_point = os.path.join(self.rootpc, self.cat[item], 'ply')
                fns_pc = sorted(os.listdir(dir_point))
            except:
                fns_pc = []
            fns = [val for val in fns_img if val + '.points.ply' in fns_pc]
            print('category ', self.cat[item], 'files ' + str(len(fns)), len(fns)/float(len(fns_img)), "%"),
            if train:
                fns = fns[:int(len(fns) * 0.8)]
            else:
                fns = fns[int(len(fns) * 0.8):]


            if len(fns) != 0:
                self.meta[item] = []
                for fn in fns:
                    objpath = "./data/ShapeNetCorev2/" +  self.cat[item] + "/" + fn + "/models/model_normalized.ply"
                    self.meta[item].append( ( os.path.join(dir_img, fn, "rendering"), os.path.join(dir_point, fn + '.points.ply'), item, objpath, fn ) )
            else:
                empty.append(item)
        for item in empty:
            del self.cat[item]
        self.idx2cat = {}
        self.size = {}
        i = 0
        for item in self.cat:
            self.idx2cat[i] = item
            self.size[i] = len(self.meta[item])
            i = i + 1
            # for fn in self.meta[item]:
            l = int(len(self.meta[item]))
            for fn in self.meta[item][0:l]:
                self.datapath.append(fn)

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

        self.transforms = transforms.Compose([
                             transforms.Resize(size =  224, interpolation = 2),
                             transforms.ToTensor(),
                             # normalize,
                        ])

        # RandomResizedCrop or RandomCrop
        self.dataAugmentation = transforms.Compose([
                                         transforms.RandomCrop(127),
                                         transforms.RandomHorizontalFlip(),
                            ])
        self.validating = transforms.Compose([
                        transforms.CenterCrop(127),
                        ])

        self.perCatValueMeter = {}
        for item in self.cat:
            self.perCatValueMeter[item] = AverageValueMeter()
        self.perCatValueMeter_metro = {}
        for item in self.cat:
            self.perCatValueMeter_metro[item] = AverageValueMeter()
        self.transformsb = transforms.Compose([
                             transforms.Resize(size =  224, interpolation = 2),
                        ])

    def __getitem__(self, index):
        fn = self.datapath[index]
        with open(fn[1]) as fp:
            for i, line in enumerate(fp):
                if i == 2:
                    try:
                        lenght = int(line.split()[2])
                    except ValueError:
                        print(fn)
                        print(line)
                    break
        for i in range(15): #this for loop is because of some weird error that happens sometime during loading I didn't track it down and brute force the solution like this.
            try:
                mystring = my_get_n_random_lines(fn[1], n = self.npoints)
                point_set = np.loadtxt(mystring).astype(np.float32)
                break
            except ValueError as excep:
                print(fn)
                print(excep)

        # centroid = np.expand_dims(np.mean(point_set[:,0:3], axis = 0), 0) #Useless because dataset has been normalised already
        # point_set[:,0:3] = point_set[:,0:3] - centroid
        if not self.normal:
            point_set = point_set[:,0:3]
        else:
            point_set[:,3:6] = 0.1 * point_set[:,3:6]

        
        """
        vis:
        write_pc_as_ply(point_set,'a.ply')
        """

        
        point_set = torch.from_numpy(point_set)

        # load image
        if self.SVR:
            if self.train:
                N_tot = len(os.listdir(fn[0])) - 3
                if N_tot==1:
                    print("only one view in ", fn)
                if self.gen_view:
                    N=0
                else:
                    N = np.random.randint(1,N_tot)
                if N < 10:
                    im = Image.open(os.path.join(fn[0], "0" + str(N) + ".png"))
                else:
                    im = Image.open(os.path.join(fn[0],  str(N) + ".png"))

                im = self.dataAugmentation(im) #random crop
            else:
                if self.idx < 10:
                    im = Image.open(os.path.join(fn[0], "0" + str(self.idx) + ".png"))
                else:
                    im = Image.open(os.path.join(fn[0],  str(self.idx) + ".png"))
                im = self.validating(im) #center crop
            data = self.transforms(im) #scale
            data = data[:3,:,:]
        else:
            data = 0
        return  point_set.contiguous()


    def __len__(self):
        return len(self.datapath)

class DATASET_LIST:
    """list of all the dataset"""
    def __init__(self):
        #todo add KIMO
        self.datasets = {"shapenet":ShapeNet,"KIMO":KITTIMultiObjectDataset}
        self.type = self.datasets.keys()

    def load(self,training,options):

        if training:
            print("\nTRAINING DATASET:")
        else:
            print("VALIDATION DATASET:")
        if options.dataset == 'KIMO':
            if training:
                dataset = self.datasets[options.dataset](num_points = options.npoint,split='train_'+options.scale)
            else:
                dataset = self.datasets[options.dataset](num_points = options.npoint,split='test_'+ options.scale)
                
        else:
            dataset = self.datasets[options.dataset](training, options)
        print("\n")
        return dataset
