from dataset import VaeTestDataset
from torch.utils.data import DataLoader
import torch
import numpy as np
import cv2
from local_mapping import LocalMap
from dataset import VaeTestDataset
import os
import os.path as osp
from tqdm import tqdm, trange
from concurrent.futures import ProcessPoolExecutor



P_prior = 0.5	# Prior occupancy probability
P_occ = 0.7	    # Probability that cell is occupied with total confidence
P_free = 0.3	# Probability that cell is free with total confidence
MAP_X_LIMIT = [0, 6.4]      # Map limits on the x-axis
MAP_Y_LIMIT = [-3.2, 3.2]   # Map limits on the y-axis
RESOLUTION = 0.1        # Grid resolution in [m]'
TRESHOLD_P_OCC = 0.8    # Occupancy threshold


# TODO: generate input motion compensated videos and plain maksed future videos
class Converter(object):
    def __init__(self,
                 dset_path,
                 device,
                 dest_path='',
                 plain=True,
                 seq_len=10):
        self.dset_path = dset_path
        self.device = device
        self.dest_path = dest_path
        self.plain = plain
        # self.train_flags = ['train', 'val', 'test']
        self.train_flags = ['test']
        self.batch_size = 1

        if self.plain:
            self.seq_len = 1
        else:
            self.seq_len = seq_len
        self.mask_gridMap = LocalMap(X_lim=MAP_X_LIMIT,
                                     Y_lim=MAP_Y_LIMIT,
                                     resolution=RESOLUTION,
                                     p=P_prior,
                                     size=[self.batch_size, self.seq_len],
                                     device=self.device)
        self.input_gridMap = LocalMap(X_lim=MAP_X_LIMIT,
                                     Y_lim=MAP_Y_LIMIT,
                                     resolution=RESOLUTION,
                                     p=P_prior,
                                     size=[self.batch_size, self.seq_len],
                                     device=self.device)
        # self.tick = 0
        # self.in_tick = 0
        # self.out_tick = 0

    def convert(self, subset_name=None):
        '''
        override the dset
        :param subset_name:
        :return:
        '''
        dset_name = self.dset_path.split('/')[-1] + '-no-motion-compensation'
        os.makedirs(dset_name, exist_ok=True)
        def convert_one_subset(subset_name):
            subset_path = osp.join(dset_name, subset_name)
            subset_abs_path = osp.join(self.dset_path, subset_name)
            os.makedirs(subset_path, exist_ok=True)
            is_test = False
            tick = None
            for flag in self.train_flags:
                tick = 0
                if is_test:
                    break
                if not osp.exists(osp.join(subset_abs_path, flag)):
                    save_path = osp.join(subset_path, "test")
                    os.makedirs(save_path, exist_ok=True)
                    file_names = self.read_files(subset_abs_path, "test")
                    is_test = True
                else:
                    # NOTE here the input and output and different kind of maps
                    # The INPUT are maps that motion compensated
                    # And the OUTPUT are the maps generated just in that time
                    save_path = osp.join(subset_path, flag)
                    os.makedirs(save_path, exist_ok=True)
                    file_names = self.read_files(osp.join(subset_abs_path, flag), flag)
                num_files = len(file_names['scans'])

                for i in trange(num_files):
                    scan = torch.from_numpy(np.load(file_names['scans'][i]))
                    # robot positions
                    x_odom = torch.zeros(self.batch_size, 1).to(self.device)
                    y_odom = torch.zeros(self.batch_size, 1).to(self.device)
                    theta_odom = torch.zeros(self.batch_size, 1).to(self.device)
                    # Lidar measurements
                    distances = scan.view(1, 1, scan.shape[0])
                    # the angles of the lidar
                    angles = torch.linspace(-(135 * np.pi / 180), 135 * np.pi / 180, distances.shape[-1]).to(
                        self.device)
                    distance_x, distance_y = self.mask_gridMap.lidar_scan_xy(distances, angles, x_odom, y_odom,
                                                                              theta_odom)
                    maps = self.mask_gridMap.discretize(distance_x, distance_y)
                    # map_viz = maps[0].to("cpu").permute(1, 2, 0).numpy()
                    # cv2.imshow("test", map_viz)
                    # cv2.waitKey()
                    maps_img = maps[0].to("cpu").permute(1,2,0).numpy().astype('uint8')*255
                    img_name = str(tick) + '.png'
                    img_path = osp.join(save_path, img_name)
                    cv2.imwrite(img_path, maps_img)
                    tick += 1
        if subset_name is None:
            subsets = os.listdir(self.dset_path)
            for dset in subsets:
                if osp.isdir(dset):
                    convert_one_subset(dset)
        else:
            convert_one_subset(subset_name)


    def convert_nonplain(self, subset_name=None):
        '''

        :param subset_name:
        :return:
        '''
        dset_name = self.dset_path.split('/')[-1] + "-motion-compensation"
        os.makedirs(dset_name, exist_ok=True)


        def convert_one_subset(subset_name):
            subset_path = osp.join(dset_name, subset_name)
            subset_abs_path = osp.join(self.dset_path, subset_name)
            os.makedirs(subset_path, exist_ok=True)
            is_test = False
            in_tick = None
            out_tick = None
            for flag in self.train_flags:
                in_tick = 0
                out_tick = 0
                if is_test:
                    break
                if not osp.exists(osp.join(subset_abs_path, flag)):
                    in_path = osp.join(subset_path, "test", "in")
                    out_path = osp.join(subset_path, "test", "out")
                    os.makedirs(in_path, exist_ok=True)
                    os.makedirs(out_path, exist_ok=True)
                    file_names = self.read_files(subset_abs_path, "test")
                    # is_test = True
                else:
                # NOTE here the input and output and different kind of maps
                # The INPUT are maps that motion compensated
                # And the OUTPUT are the maps generated just in that time
                    in_path = osp.join(subset_path, flag, "in")
                    out_path = osp.join(subset_path, flag, "out")
                    os.makedirs(in_path, exist_ok=True)
                    os.makedirs(out_path, exist_ok=True)
                    file_names = self.read_files(osp.join(subset_abs_path, flag), flag)
                    is_test = True
                num_files = len(file_names["scans"])

                # Mapping
                for seq_cnt in trange(num_files - self.seq_len):
                    scans = []
                    velocities = []
                    positions = []
                    predicted_scans = []
                    for time_step in range(self.seq_len):
                        scan = torch.from_numpy(np.load(file_names["scans"][seq_cnt + time_step]))
                        vel = torch.from_numpy(np.load(file_names["velocities"][seq_cnt + time_step]))
                        posi = torch.from_numpy(np.load(file_names["positions"][seq_cnt + time_step]))
                        scans.append(scan)
                        velocities.append(vel)
                        positions.append(posi)
                    # the triple of seq_len data is for generating the motion compensated maps
                    scans = torch.stack(scans, dim=0).unsqueeze(0)
                    velocities = torch.stack(velocities, dim=0).unsqueeze(0)
                    positions = torch.stack(positions, dim=0).unsqueeze(0)
                    # the following data is also 10 frames, and is for generating the next predicted frames
                    if seq_cnt + self.seq_len + 10 > len(file_names["scans"]):
                        print("OVERFLOW break")
                        break
                    for time_step in range(self.seq_len):
                        predicted_scan = torch.from_numpy(np.load(file_names["scans"][seq_cnt+self.seq_len+time_step]))
                        predicted_scans.append(predicted_scan)
                    # TODO: Thus the in_seq and out_seq is aligned with respect to the prediction task
                    predicted_scans = torch.stack(predicted_scans, dim=0).unsqueeze(0)


                    # Then mapping of the two kind of frames
                    obs_pos_N = positions[:, -1]
                    vel_N = velocities[:, -1]

                    # The predicted horizon, const here
                    T = 1
                    noise_std = [0] * 3
                    pos_origin = self.input_gridMap.origin_pose_prediction(vel_N, obs_pos_N, T, noise_std)

                    # acquired the posi of the robot in the past seq_len frames
                    pos = positions
                    x_odom, y_odom, theta_odom = self.input_gridMap.robot_coordinate_transform(pos, pos_origin)
                    distances = scans
                    angles = torch.linspace(-(135*np.pi/180), 135*np.pi/180, distances.size(-1))
                    distances_x, distances_y = self.input_gridMap.lidar_scan_xy(distances, angles, x_odom, y_odom, theta_odom)
                    input_binary_maps = self.input_gridMap.discretize(distances_x, distances_y)

                    # The output maps
                    x_odom = torch.zeros(self.batch_size, self.seq_len).to(self.device)
                    y_odom = torch.zeros(self.batch_size, self.seq_len).to(self.device)
                    theta_odom = torch.zeros(self.batch_size, self.seq_len).to(self.device)
                    distances = predicted_scans
                    distance_x, distance_y = self.mask_gridMap.lidar_scan_xy(distances, angles, x_odom, y_odom,
                                                                             theta_odom)
                    output_binary_maps = self.mask_gridMap.discretize(distance_x, distance_y)
                    # map_viz = maps[0].to("cpu").permute(1, 2, 0).numpy()
                    # cv2.imshow("test", map_viz)
                    # cv2.waitKey()
                    input_map_imgs = input_binary_maps[0].to("cpu").permute(1,2,0).numpy().astype("uint8")*255
                    for idx in range(input_map_imgs.shape[-1]):
                        input_img_name = str(in_tick) + '.png'
                        input_img_path = osp.join(in_path, input_img_name)
                        input_map_img = input_map_imgs[:,:,idx]
                        # input_map_img = cv2.cvtColor(input_map_img, cv2.COLOR_BGR2GRAY)
                        cv2.imwrite(input_img_path, input_map_img)
                        in_tick += 1

                    output_map_imgs = output_binary_maps[0].to("cpu").permute(1,2,0).numpy().astype("uint8")*255
                    for idx in range(output_map_imgs.shape[-1]):
                        output_img_name = str(out_tick) + '.png'
                        output_img_path = osp.join(out_path, output_img_name)
                        output_map_img = output_map_imgs[:,:,idx]
                        # output_map_img = cv2.cvtColor(output_map_img, cv2.COLOR_BGR2GRAY)
                        cv2.imwrite(output_img_path, output_map_img)
                        out_tick += 1

                    # output_map_img = output_binary_maps[0].to("cpu").permute(1, 2, 0).numpy().astype("uint8") * 255
                    # output_img_name = str(out_tick) + '.png'
                    # output_img_path = osp.join(out_path, output_img_name)
                    # cv2.imwrite(output_img_path, output_map_img)

                    # out_tick += 1
                    # maps_img = maps[0].to("cpu").permute(1, 2, 0).numpy().astype('uint8') * 255
                    # img_name = str(self.tick) + '.png'
                    # img_path = osp.join(save_path, img_name)
                    # cv2.imwrite(img_path, maps_img)
                    # self.tick += 1
        if subset_name is None:
            subsets = os.listdir(self.dset_path)
            for dset in subsets:
                if osp.isdir(dset):
                    convert_one_subset(dset)
        else:
            convert_one_subset(subset_name)


    @staticmethod
    def read_files(path, train_flag):
        '''
        :param path: NOTE PATH must be the train flags
        :param train_flag:
        :return:
        '''
        def read(f, sensor):
            file_names = []
            with open(f, 'r') as fcon:
                for line in fcon.read().split('\n'):
                    if '.npy' in line:
                        file_names.append(osp.join(path, sensor, line))
            return file_names
        scans_file_entries = osp.join(path, 'scans', train_flag + '.txt')
        positions_file_entries = osp.join(path, 'positions', train_flag + '.txt')
        velocities_file_entries = osp.join(path, 'velocities', train_flag + '.txt')
        scans_file_names = read(scans_file_entries, 'scans')
        positions_file_names = read(positions_file_entries, 'positions')
        velocities_file_names = read(velocities_file_entries, 'velocities')
        files_names = {
            "scans": scans_file_names,
            "positions": positions_file_names,
            "velocities": velocities_file_names
        }

        return files_names


if __name__ == "__main__":
    dset_base_path = "/data/OGM-datasets"
    device = torch.device("cpu")
    converter = Converter(dset_path=dset_base_path,
                          device=device,
                          plain=False)

    sub_dsets = ["OGM-Turtlebot2",
                 "OGM-Jackal/test_library2pond",
                 "OGM-Jackal/test_pond2library",
                 "OGM-Spot/test_union1",
                 "OGM-Spot/test_union2",
                 ]

    # converter.convert_nonplain(subset_name=sub_dsets[2])
    #
    # with ProcessPoolExecutor() as executor:
    #     executor.map(converter.convert, sub_dsets)


    # with ProcessPoolExecutor() as executor:
    #     executor.map(converter.convert_nonplain, sub_dsets)

    for sub_set in sub_dsets[:1]:
        converter.convert_nonplain(sub_set)