import argparse
import os, sys
import shutil
import time
import numpy as np
from scipy.spatial import cKDTree
import dill
import cv2
import pandas
import open3d
from matplotlib import pyplot as plt
# from pycaret.regression import *

def denoise_float32(array):
    kernel = np.array((
        [0.0625, 0.125, 0.0625],
        [0.125, 0.25, 0.125],
        [0.0625, 0.125, 0.0625]), dtype=np.float32)
    
    denoised = cv2.filter2D(array, -1, kernel)
    return denoised

def polar_filter(data, angular_resolution, fieldname='intensity', filter2=None, save_to=None, only_range_image=False):
    width = int(360 / angular_resolution)
    height = int(180 / angular_resolution)
    range_image = np.zeros((width, height, 2), dtype=np.float32)
    count = np.zeros((width, height), dtype=np.int32)
    h, v = np.mod(np.int32(data['hangle'] / angular_resolution), width), np.mod(np.int32(data['vangle'] / angular_resolution), height)
    dists = data['dist'].to_numpy()
    fields = data[fieldname].to_numpy()
    for i in range(data.shape[0]):
        range_image[h[i], v[i], 0] = range_image[h[i], v[i], 0] + dists[i]
        range_image[h[i], v[i], 1] = range_image[h[i], v[i], 1] + fields[i]
        count[h[i], v[i]] = count[h[i], v[i]] + 1
    
    count_idx = count.nonzero()
    for i in range(range_image.shape[2]):
        range_image[count_idx[0], count_idx[1], i] = range_image[count_idx[0], count_idx[1], i] / count[count_idx[0], count_idx[1]]
    
    if filter2 is not None:
        range_image = filter2(range_image)
    
    if only_range_image:
        return range_image

    if save_to is not None:
        plt.imsave(save_to, range_image[:, :, 1].transpose(), cmap='rainbow', vmin=-10, vmax=10)

    # horizantal angle (in degree), vertical angle (in degree), distance, intensity, x, y, z    
    polar = np.array(list(count_idx)).transpose()
    filtered_data = np.zeros((polar.shape[0], 4), dtype=np.float32)
    filtered_data[:, [0, 1]] = (polar) * angular_resolution
    filtered_data[:, 2] = range_image[polar[:, 0], polar[:, 1], 0]
    filtered_data[:, 3] = range_image[polar[:, 0], polar[:, 1], 1]
    # valid_points = (range_image[:, :, 0] < 1500).nonzero() #only consider near points
    # std = np.std(range_image[valid_points[0], valid_points[1], 1])
    std = np.sqrt(np.square(range_image[:, :, 1]).mean())
    return pandas.DataFrame(filtered_data, columns=['hangle', 'vangle', 'dist', fieldname]), std

def transformSPH2XYZ(data):
    hangles = data['hangle']
    vangles = data['vangle']
    r = data['dist']
    xyz = np.zeros((r.shape[0], 3))
    a = np.deg2rad(hangles)  # 水平转角
    b = np.deg2rad(vangles)  # 竖直转角
    xyz[:, 0] = r * np.sin(b) * np.cos(a)
    xyz[:, 1] = -r * np.sin(b) * np.sin(a)
    xyz[:, 2] = r * np.cos(b)
    data.loc[:, 'x'] = xyz[:, 0]
    data.loc[:, 'y'] = xyz[:, 1]
    data.loc[:, 'z'] = xyz[:, 2]
    return data

class Calibrator:
    def __init__(self, angle_res=2, cos_in_res=0.01, inv_dist_res=0.05, min_dist=100, max_dist=10000, max_offset=5, method='nn', align_angle=180) -> None:        
        self.__max_offset = max_offset
        self.__min_cos_in = np.cos(np.pi * 75 / 180)
        self.__num_vangle = int(360 / angle_res + 1)
        self.__num_cos_in = int((1 - self.__min_cos_in) / cos_in_res + 1)
        # self.__num_iangle = int(90 / angle_res + 1)
        self.__min_dist = min_dist
        self.__inv_max_dist = 0#int(np.square(min_dist / max_dist) / inv_dist_res)
        self.__num_dist = int(1 / inv_dist_res - self.__inv_max_dist + 1)
        self.__angle_resolution = angle_res
        self.__cos_in_resolutin = cos_in_res
        self.__inv_dist_resolution = inv_dist_res
        
        # self.__coeff = np.zeros((self.__num_vangle, self.__num_cos_in), dtype=np.float32)
        # self.__train_count = np.zeros((self.__num_vangle, self.__num_cos_in), dtype=np.uint32)
        self.__dirty = True        
        self.__training_method = method
        if self.__training_method.startswith('pycaret'):
            #self.__top3 = None
            # self.__train_data = None
            self.__best_model = None
        # else:
        self.__coeff = np.zeros((self.__num_vangle, self.__num_cos_in, self.__num_dist), dtype=np.float32)
        self.__train_count = np.zeros((self.__num_vangle, self.__num_cos_in, self.__num_dist), dtype=np.uint32)
        self.__align_angle = align_angle
        self.__align_angle_count = 0

    def compute_align_angle(self, input):
        # if self.__align_angle > 0 : return self.align_angle
        negative_cloud = input[input['vangle'] < 0]
        positive_cloud = input[input['vangle'] >= 0]
        filter_resolution = 0.2        
        negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']
        negative_image = polar_filter(negative_cloud, angular_resolution=filter_resolution, only_range_image=True)
        negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']
        negative_image = np.uint8(negative_image[:, :, 1] * 255 / negative_image[:, :, 1].max())
        negative_image = negative_image.transpose()
        positive_image = polar_filter(positive_cloud, angular_resolution=filter_resolution, only_range_image=True)
        positive_image = np.uint8(positive_image[:, :, 1] * 255 / positive_image[:, :, 1].max())
        positive_image = positive_image.transpose()
        def on_angle_bias_changed(bias):
            diff = np.zeros((negative_image.shape[0], negative_image.shape[1], 3), dtype=np.uint8)
            start = bias + int(175 / filter_resolution)
            diff[:, start:, 0] = negative_image[:, :diff.shape[1] - start]
            diff[:, :start, 0] = negative_image[:, diff.shape[1] - start:]
            diff[:, :, 1] = positive_image
            diff[:, :, 2] = diff[:, :, 0] // 2 + diff[:, :, 1] // 2
            align_angle = 175 + bias * filter_resolution
            cv2.putText(diff, '%.1f' % align_angle, (10, diff.shape[0] - 10), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255))
            cv2.imshow('diff', diff)

        cv2.namedWindow('diff')
        cv2.createTrackbar('angle bias', 'diff', 0, int(10 / filter_resolution) + 1, on_angle_bias_changed)
        cv2.setTrackbarPos('angle bias', 'diff', int((self.align_angle - 175) / filter_resolution))
        while(True):
            key = cv2.waitKey()
            if key == 100 or key == 83:
                bias = cv2.getTrackbarPos('angle bias', 'diff')
                cv2.setTrackbarPos('angle bias', 'diff', bias + 1)
            elif key == 97 or key == 81:
                bias = cv2.getTrackbarPos('angle bias', 'diff')
                cv2.setTrackbarPos('angle bias', 'diff', bias - 1)
            else:
                break
        
        angle = 175 + cv2.getTrackbarPos('angle bias', 'diff') * filter_resolution
        cv2.destroyAllWindows()
        self.__align_angle = self.__align_angle + angle
        self.__align_angle_count = self.__align_angle_count + 1
        return angle

    @property
    def align_angle(self):
        if self.__align_angle_count == 0: return self.__align_angle
        return self.__align_angle / self.__align_angle_count

    @property
    def angle_resolution(self):
        return self.__angle_resolution

    @property
    def cos_in_resolution(self):
        return self.__cos_in_resolutin

    @property
    def inv_dist_resolution(self):
        return self.__inv_dist_resolution

    @property
    def training_method(self):
        return self.__training_method

    @classmethod
    def load(cls, path):
        with open(path, 'rb') as f:
            print('loading calibration %s' % path)
            c = dill.load(f)
            if c.__training_method.startswith('pycaret'):
                dirname = os.path.dirname(path)
                model_name = c.__training_method
                model_path = '%s/%s.pkl' % (dirname, model_name)
                if os.path.exists(model_path):
                    print('loading pycaret model %s' % model_path)
                    model_path = '%s/%s' % (dirname, model_name)                    
                    c.__best_model = load_model(model_path)
            return c        

    def dump(self, path):
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        best_model = None
        if self.__training_method.startswith('pycaret'):
            model_name = self.__training_method            
            save_model(self.__best_model, model_name, model_only=False)
            model_name = '%s.pkl' % self.__training_method
            assert os.path.exists(model_name)
            model_path = '%s/%s' % (dirname, model_name)
            if os.path.exists(model_path):
                os.remove(model_path)

            os.rename(model_name, model_path)            
            print('model saved to %s' % model_path)
            assert os.path.exists(model_path)
            best_model = self.__best_model
            self.__best_model = None
        with open(path, 'wb') as f:
            dill.dump(self, f)

        if best_model is not None:
            self.__best_model = best_model

    def __compute_index(self, data):
        vangles = np.int32((data['vangle'].to_numpy() + 180) / self.__angle_resolution)
        cos_ins = np.int32((data['cos_in'].to_numpy() - self.__min_cos_in) / self.__cos_in_resolutin)
        # iangles = np.int32(data['iangle'].to_numpy() / self.__angle_resolution)
        # assert (iangles < 0).sum() == 0
        data.loc[data['dist'] < 1, 'dist'] = 1
        inv_dists = np.int32((np.square(self.__min_dist / data['dist'].to_numpy())) / self.__inv_dist_resolution) - self.__inv_max_dist
        return vangles, cos_ins, inv_dists
        # return vangles, iangles, inv_dists
        # return vangles, cos_ins

    def train(self, data):
        print('training %s ...' % f)
        start_time = time.time()
        if not self.__dirty:
            self.__dirty = True

        # if self.__training_method.startswith('pycaret'):
        #         if self.__train_data is None:
        #             self.__train_data = data[['vangle', 'cos_in', 'dist', 'offset']]
        #         else:
        #             self.__train_data = self.__train_data.append(data[['vangle', 'cos_in', 'dist', 'offset']])
        # else:
        vangles, cos_ins, inv_dists = self.__compute_index(data)
        # vangles, cos_ins = self.__compute_index(data)
        offset = data['offset'].to_numpy()
        for i in range(data.shape[0]):
            if inv_dists[i] >= self.__num_dist: continue
            if inv_dists[i] < 0: continue
            if offset[i] > self.__max_offset: continue #ignore offset too large
            if cos_ins[i] < 0: continue
            self.__coeff[vangles[i], cos_ins[i], inv_dists[i]] = self.__coeff[vangles[i], cos_ins[i], inv_dists[i]] + offset[i]
            self.__train_count[vangles[i], cos_ins[i], inv_dists[i]] = self.__train_count[vangles[i], cos_ins[i], inv_dists[i]] + 1
            # self.__coeff[vangles[i], cos_ins[i]] = self.__coeff[vangles[i], cos_ins[i]] + offset[i]
            # self.__train_count[vangles[i], cos_ins[i]] = self.__train_count[vangles[i], cos_ins[i]] + 1

        print('%s trained, current data coverage is %.1f%%.' % (f, self.coverage() * 100))
        end_time = time.time()
        print('training used %.1f minutes.' % ((end_time - start_time) / 60))

    def build(self):
        if self.__dirty:
            start_time = time.time()
            print('building training data ...')
            # if self.__training_method.startswith('pycaret'):                
            #     data = self.__train_data
                
            #     train_data = data.loc[data['offset'] < self.__max_offset, ['vangle', 'cos_in', 'dist', 'offset']]              
            #     # train_data = train_data[::1000]
            #     # train_data.loc[:, 'inv_dist2'] = np.square(self.__min_dist / train_data['dist'].to_numpy())
            #     train_data.loc[:, 'offset'] = train_data['offset'] / train_data['cos_in']
            #     # train_data = train_data.drop(columns=['dist'])
            #     print('train features: %s' % train_data.columns)
            #     use_gpu = self.__training_method.endswith('gpu')
            #     # train_data = setup(data=train_data, target='offset', numeric_features=['vangle', 'cos_in', 'inv_dist2'],
            #     train_data = setup(data=train_data, target='offset', numeric_features=['vangle', 'cos_in', 'dist'],
            #         silent=True, pca=True, normalize=True, normalize_method='minmax', use_gpu=use_gpu, verbose=False)

            #     top3 = compare_models(n_select=3, budget_time=30)
            #     top3 = [finalize_model(model) for model in top3]
            #     top3 = [tune_model(model, verbose=False) for model in top3]
            #     # blender = blend_models(top3)
            #     # stacker = stack_models(top3)
            #     self.__best_model = automl()                
            #     print('best model is %s' % self.__best_model)
            # else:           
            idx = self.__train_count.nonzero()
            print('interpolating %s points using %s ...' % (len(idx[0]), self.__training_method))
            points = np.zeros((idx[0].shape[0], 3), dtype=np.uint32)            
            values = np.zeros(len(idx), dtype=np.float32)
            points[:, 0] = idx[0]
            points[:, 1] = idx[1]
            points[:, 2] = idx[2]
            values = (self.__coeff[idx] / self.__train_count[idx] / (idx[1] * self.__cos_in_resolutin + self.__min_cos_in))
            values[values > self.__max_offset] = 0
            if self.__training_method == 'scipy':
                from scipy.interpolate import LinearNDInterpolator, griddata
                interp = LinearNDInterpolator(points, values, fill_value=0)
                vangles, cos_ins, dists = np.meshgrid(np.arange(0, self.__num_vangle, 1),
                    np.arange(0, self.__num_cos_in, 1), np.arange(0, self.__num_dist, 1), indexing='ij')
                self.__grid = interp(vangles, cos_ins, dists)
                self.__grid[np.isnan(self.__grid)] = 0
                # vangles, cos_ins= np.meshgrid(np.arange(0, self.__num_vangle, 1), np.arange(0, self.__num_cos_in, 1), indexing='ij')
                # self.__grid = interp(vangles, cos_ins)
            elif self.__training_method == 'nn':
                import naturalneighbor
                ranges = [[0, self.__num_vangle, 1], [0, self.__num_cos_in, 1], [0, self.__num_dist, 1]]
                self.__grid = naturalneighbor.griddata(points, values, ranges)
                self.__grid[np.isnan(self.__grid)] = 0
            elif self.__training_method == 'pycaret':
                train_data = pandas.DataFrame(points, columns=['vangle', 'cos_in', 'inv_dist2'])  
                train_data['offset'] = values
                # train_data = train_data[::1000]
                # train_data.loc[:, 'inv_dist2'] = np.square(self.__min_dist / train_data['dist'].to_numpy())
                train_data.loc[:, 'offset'] = train_data['offset'] / train_data['cos_in']
                # train_data = train_data.drop(columns=['dist'])
                print('train features: %s' % train_data.columns)
                use_gpu = self.__training_method.endswith('gpu')
                # train_data = setup(data=train_data, target='offset', numeric_features=['vangle', 'cos_in', 'inv_dist2'],
                train_data = setup(data=train_data, target='offset', numeric_features=['vangle', 'cos_in', 'inv_dist2'],
                    silent=True, pca=True, normalize=True, normalize_method='minmax', use_gpu=use_gpu, verbose=False)

                top3 = compare_models(n_select=3, budget_time=30)
                top3 = [finalize_model(model) for model in top3]
                top3 = [tune_model(model, verbose=False) for model in top3]
                # blender = blend_models(top3)
                # stacker = stack_models(top3)
                self.__best_model = automl()                
                print('best model is %s' % self.__best_model)
            
            self.__dirty = False
            end_time = time.time()
            print('training used %.1f minutes.' % ((end_time - start_time) / 60))
            print('data coverage is %.1f%%.' % (self.coverage() * 100))
            print('best align angle is %.1f' % (self.align_angle))
            print('train completed.')

    def show_model(self):
        if self.__training_method.startswith('pycaret'):
            plot_model(self.__best_model)
        else:
            from mpl_toolkits.mplot3d import axes3d, Axes3D
            fig = plt.figure(figsize=(15, 15))
            ax = Axes3D(fig)
            # Make data.
            X = np.arange(0, self.__num_cos_in, 1)
            Y = np.arange(0, self.__num_vangle, 1)
            X, Y = np.meshgrid(X, Y)
            Z = self.__grid[:, :, 1].reshape((self.__num_vangle, self.__num_cos_in))

            # Plot the surface.
            surf = ax.plot_surface(X, Y, Z, cmap='rainbow', vmin=-10, vmax=10)
            plt.show()


    def coverage(self):
        if self.__training_method.startswith('pycaret'): return 0
        return (self.__train_count > 0).sum() / self.__train_count.size

    def calibrate(self, data):
        self.build()
        vangles, cos_ins, inv_dists = self.__compute_index(data)
        if self.__training_method.startswith('pycaret'):
            print('using model %s ...' % self.__best_model)
            # data.loc[:, 'inv_dist2'] = np.square(self.__min_dist / data['dist'].to_numpy())
            # coeff = predict_model(self.__best_model, data=data[['vangle', 'cos_in', 'inv_dist2']], verbose=False)['Label']
            df = pandas.DataFrame()
            df['vangle'] = vangles
            df['cos_in'] = cos_ins
            df['inv_dist2'] = inv_dists
            coeff = predict_model(self.__best_model, data=df, verbose=False)['Label']
        else:
            
            # vangles, cos_ins = self.__compute_index(data)
            points_to_calib = (inv_dists >= 0) * (inv_dists < self.__num_dist) * (cos_ins >= 0)
            coeff = np.zeros(vangles.shape[0])
            coeff[points_to_calib] = self.__grid[vangles[points_to_calib], cos_ins[points_to_calib], inv_dists[points_to_calib]]
            # coeff = self.__grid[vangles, cos_ins]
        # coeff = data['offset'] / data['cos_in']
        coeff[np.isnan(coeff)] = 0
        
        return data['dist'] + coeff

def segment_all_planes(data):
    min_ransac_n = data.shape[0] // 20
    ocloud = open3d.geometry.PointCloud()
    xyz = data[['x', 'y', 'z']].to_numpy()
    ocloud.points = open3d.utility.Vector3dVector(xyz)
    all_planes = []
    while len(ocloud.points) > min_ransac_n:
        # print(len(ocloud.points))
        try:
            plane, inliers = ocloud.segment_plane(
                distance_threshold=40,
                ransac_n=min_ransac_n,
                num_iterations=500)
            if plane is None:
                continue
            
            if plane[3] < 0: plane = -plane
            all_planes.append(plane)
            ocloud = ocloud.select_by_index(inliers, invert=True)
        except Exception as e:
            print(e)
            break

    return np.array(all_planes)


def magnitude(data):
    return np.sqrt(np.sum(np.square(data), axis=-1))

def compute_incidence(cloud, planes):
    cloud = cloud[['x', 'y', 'z']].to_numpy()
    mag_cloud = magnitude(cloud)
    rays = -cloud
    rays[:, 0] = rays[:, 0] / mag_cloud
    rays[:, 1] = rays[:, 1] / mag_cloud
    rays[:, 2] = rays[:, 2] / mag_cloud
    incidence = np.zeros((cloud.shape[0], 2), dtype=np.float32)
    incidence[:, 0] = 1
    incidence[:, 1] = 100
    for i in range(planes.shape[0]):
        offset = np.dot(cloud, planes[i, :3]) + planes[i, 3]
        inliers = np.abs(offset) < np.abs(incidence[:, 1])
        if inliers.sum()  == 0: continue
        incidence[inliers, 1] = offset[inliers]
        incidence[inliers, 0] = np.dot(rays[inliers, :], planes[i, :3])

    # incidence[:, 1] = incidence[:, 1] / incidence[:, 0]
    # incidence[:, 0] = np.rad2deg(np.arccos(incidence[:, 0]))
    return pandas.DataFrame(incidence, columns=['cos_in', 'offset'])

def preprocess(input, args, keep_all=False):    
    negative_cloud = input[input['vangle'] < 0]
    positive_cloud = input[input['vangle'] >= 0]
    filter = None
    if args.train and args.denoise: filter = denoise_float32
    
    filter_resolution = args.angle_resolution
    negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']
    filtered_negative_cloud, _ = polar_filter(negative_cloud, angular_resolution=filter_resolution, filter2=filter)    
    negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']
    filtered_negative_cloud.loc[:, 'vangle'] = -filtered_negative_cloud['vangle']
    filtered_negative_cloud = transformSPH2XYZ(filtered_negative_cloud)
    negative_planes = segment_all_planes(filtered_negative_cloud)    

    filtered_positive_cloud, _ = polar_filter(positive_cloud, angular_resolution=filter_resolution, filter2=filter)
    filtered_positive_cloud = transformSPH2XYZ(filtered_positive_cloud)
    positive_planes = segment_all_planes(filtered_positive_cloud)

    if keep_all:
        negative_cloud = transformSPH2XYZ(negative_cloud)
        negative_incidence = compute_incidence(negative_cloud, negative_planes)
        positive_cloud = transformSPH2XYZ(positive_cloud)
        positive_incidence = compute_incidence(positive_cloud, positive_planes)        
        all = negative_cloud.append(positive_cloud)
    else:
        negative_incidence = compute_incidence(filtered_negative_cloud, negative_planes)
        positive_incidence = compute_incidence(filtered_positive_cloud, positive_planes)
        all = filtered_negative_cloud.append(filtered_positive_cloud)
    
    all_incidence = negative_incidence.append(positive_incidence)
    all.loc[:, 'cos_in'] = all_incidence['cos_in'].to_numpy()
    all.loc[:, 'offset'] = all_incidence['offset'].to_numpy()
    return all
    
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', default='./data/train', help='folder of input data')
    parser.add_argument('-o', '--output', default='./data/test/output', help='folder for calibration output')
    parser.add_argument('-s', '--saveoffset', default=True, action='store_true', help='save range images of offset or not')
    parser.add_argument('-c', '--calibration', default='./data/train/calibration.pycaret', help='path prefix for calibration file (output if training or input if calibration)')
    parser.add_argument('-t', '--train', default=False, action='store_true', help='run for training or calibrating')    
    parser.add_argument('-a', '--append', default=False, action='store_true', help='append to existed training data or create new one')
    parser.add_argument('-b', '--bias', default=180)
    parser.add_argument('-d', '--denoise', default=False, action='store_true')
    parser.add_argument('-r', '--angle_resolution', default=0.5)
    parser.add_argument('-m', '--method', default='pycaret', help='available methods: scipy, nn, pycaret, pycaret-gpu')    
    args = parser.parse_args()
    args.bias = float(args.bias)
    # args.calibration = '%s.%s' % (args.calibration, args.method)

    if not args.train:
        print('start calibrating...')
        if os.path.exists(args.calibration):
            calibrator = Calibrator.load(args.calibration)
        else:
            # calibrator = Calibrator(method=args.method)
            exit('Error: no trained data for calibrating.')

        if calibrator.training_method.startswith('pycaret'):
            args.output = '%s-%s' % (args.output, calibrator.training_method)
        else:
            args.output = '%s-d2-%.f-%.3f-%.2f' % (args.output, calibrator.angle_resolution, calibrator.cos_in_resolution, calibrator.inv_dist_resolution)
        precab_output = '%s/precab' % args.output
        postcab_output = '%s/postcab' % args.output
        if os.path.exists(args.output):
            shutil.rmtree(args.output)

        print('make output folder %s' % args.output)
        os.makedirs(args.output)
        os.makedirs(precab_output)         
        os.makedirs(postcab_output)
    else:
        print('start training with %s ...' % args.method)
        if os.path.exists(args.calibration):
            if args.append:
                calibrator = Calibrator.load(args.calibration)
                print('data coverage of last train is %.1f%%, start to append new data...' % (calibrator.coverage() * 100))
            else:
                os.remove(args.calibration)
                calibrator = Calibrator(method=args.method, align_angle=args.bias)
        else:
            calibrator = Calibrator(method=args.method, align_angle=args.bias)
            # args.bias = True # force compute angle bias for first training
                
        # args.angle_resolution = calibrator.angle_resolution

    files = [f for f in os.listdir(args.input) if os.path.splitext(f)[-1] == '.csv']
    if len(files) == 0:
        files = [f for f in os.listdir(args.input) if os.path.splitext(f)[-1] == '.hdf']
        if len(files) == 0:
            print('no scan data found.')
            exit(-1)
    else:
        for f in files:
            path = '%s/%s' % (args.input, f)
            hdf_path = '%s/%s.hdf' % (args.input, f)
            hdf = None
            if os.path.exists(hdf_path):
                hdf = pandas.HDFStore(hdf_path, mode='r')
                if args.train and ('polar' in hdf):
                    hdf.close()
                    continue
                elif (not args.train) and ('raw' in hdf):
                    hdf.close()
                    continue

                hdf.close()

            print('convert %s to hdf5 ...' % f)            
            input = pandas.read_csv(path, header=None,
                names=['timestamp', 'temperature', 'hangle', 'vangle', 'dist', 'intensity'],
                dtype={'timestamp': np.float64, 'temperature': np.int32, 'hangle': np.float32, 'vangle': np.float32, 'dist': np.float32
                , 'intensity': np.float32})        

            if args.train:
                polar = preprocess(input, args)                
                polar.to_hdf(hdf_path, 'polar', append=True)
            else:
                input = preprocess(input, args, keep_all=True)            
                input.to_hdf(hdf_path, 'raw', append=True)

            print('preprocessed data saved to %s.' % hdf_path)

        files = [f for f in os.listdir(args.input) if os.path.splitext(f)[-1] == '.hdf']
    
    i = 0
    measurement = pandas.DataFrame(columns=['file', 'precab-negative', 'precab-positive', 'precab-all',
     'postcab-negative', 'postcab-positive', 'postcab-all', 'improved-negative', 'improved-positive', 'improved-all'])
    for f in files:
        path = '%s/%s' % (args.input, f)               
        if args.train:
            all = pandas.read_hdf(path, 'polar') 
            # if args.method.startswith('pycaret') and (i % 10) != 0:
            #     # break
            #     i = i + 1
            #     continue #skip data of odd number
            if args.bias < 0:
                angle_bias = calibrator.compute_align_angle(all)
                print('best align angle for %s is %.1f' % (f, angle_bias))            
            calibrator.train(all)
        else:
            all = pandas.read_hdf(path, 'raw') 
            print('calibrating %s points in %s ...' % (all.shape[0], f))
            negative = all['vangle'] < 0
            all.loc[negative, 'hangle'] = (all[negative]['hangle'] + calibrator.align_angle) % 360
            if args.saveoffset:
                output_path = '%s/%s-negative.png' % (precab_output, f)
                negative_cloud = all[all['vangle'] < 0]                
                positive_cloud = all[all['vangle'] >= 0]                
                _, precab_negative_std = polar_filter(negative_cloud[['hangle', 'vangle', 'dist', 'offset']], args.angle_resolution, fieldname='offset', save_to=output_path, filter2=denoise_float32)                
                negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']  
                output_path = '%s/%s-positive.png' % (precab_output, f)
                _, precab_positive_std = polar_filter(positive_cloud[['hangle', 'vangle', 'dist', 'offset']], args.angle_resolution, fieldname='offset', save_to=output_path, filter2=denoise_float32)                

                output_path = '%s/%s-all.png' % (precab_output, f)
                all_cloud = negative_cloud.append(positive_cloud)
                _, precab_all_std = polar_filter(all_cloud[['hangle', 'vangle', 'dist', 'offset']], args.angle_resolution, fieldname='offset', save_to=output_path, filter2=denoise_float32)
                negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']

            all['dist'] = calibrator.calibrate(all)

            if args.saveoffset:
                all_cloud =  preprocess(all, args)
                output_path = '%s/%s-negative.png' % (postcab_output, f)
                negative_cloud = all_cloud[all_cloud['vangle'] < 0]
                # negative_cloud.loc[:, 'hangle'] = (negative_cloud['hangle'] + calibrator.align_angle) % 360
                positive_cloud = all_cloud[all_cloud['vangle'] >= 0]
                negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle']                
                _, postcab_negative_std = polar_filter(negative_cloud[['hangle', 'vangle', 'dist', 'offset']], args.angle_resolution, fieldname='offset', save_to=output_path, filter2=denoise_float32)
                # negative_cloud.loc[:, 'vangle'] = -negative_cloud['vangle'] 

                output_path = '%s/%s-positive.png' % (postcab_output, f)                
                _, postcab_positive_std = polar_filter(positive_cloud[['hangle', 'vangle', 'dist', 'offset']], args.angle_resolution, fieldname='offset', save_to=output_path, filter2=denoise_float32)            

                output_path = '%s/%s-all.png' % (postcab_output, f)
                all_cloud = negative_cloud.append(positive_cloud)
                _, postcab_all_std = polar_filter(all_cloud[['hangle', 'vangle', 'dist', 'offset']], args.angle_resolution, fieldname='offset', save_to=output_path, filter2=denoise_float32)

            output_path = '%s/%s' % (args.output, f)
            # negative_cloud.loc[:, 'hangle'] = (negative_cloud['hangle'] - 180) % 360            
            all.loc[negative, 'hangle'] = (all[negative]['hangle'] - 180) % 360
            all[['hangle', 'vangle', 'dist', 'intensity']].to_hdf(output_path, 'raw')            
            print('%s points in %s calibrated' % (all.shape[0], f))
            measurement = measurement.append({
                'file': f,
                'precab-negative': precab_negative_std,
                'precab-positive': precab_positive_std,
                'precab-all': precab_all_std,
                'postcab-negative': postcab_negative_std,
                'postcab-positive': postcab_positive_std,
                'postcab-all': postcab_all_std,
                'improved-negative': precab_negative_std - postcab_negative_std,
                'improved-positive': precab_positive_std - postcab_positive_std,
                'improved-all': precab_all_std - postcab_all_std
            }, ignore_index=True)
            print('precab negative std: %f, postcab negative std: %f, %f improved.'
             % (precab_negative_std, postcab_negative_std, precab_negative_std - postcab_negative_std))
            print('precab positive std: %f, postcab positive std: %f, %f improved'
             % (precab_positive_std, postcab_positive_std, precab_positive_std - postcab_positive_std))
            print('precab all std: %f, postcab all std: %f, %f improved'
             % (precab_all_std, postcab_all_std, precab_all_std - postcab_all_std))

        i = i + 1
        print('%s file(s) done, %s files(s) remained, %.1f%% completed.' % (i, len(files) - i, 100 * i / len(files)))
        # break        
    
    if args.train:
        calibrator.build()                
        calibrator.dump(args.calibration)
        # calibrator.show_model()
    else:
        print(measurement)
        mpath = '%s/measurement-%.f-%.3f-%.f.csv' % (args.output, calibrator.angle_resolution, calibrator.cos_in_resolution, calibrator.inv_dist_resolution)
        measurement.to_csv(mpath)
        print('output to %s done.' % args.output)
        
