import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import os
import imageio as iio
from typing import List, Union, Dict
from local_occ_grid_map import LocalMap


def get_instance_path(path: str) -> List[str]:
    files = os.listdir(path)
    return sorted([os.path.join(path, i) for i in files])

def load_from_npy(files: List[str]):
    return np.stack([np.load(i) for i in sorted(files)], axis=0)

def normalize_feature(feature: np.array) -> np.array:
    feature_offset = np.mean(feature, axis=0)
    std = np.mean(np.square(feature - feature_offset), axis=0)
    assert np.all(std > 0)
    return (feature - feature_offset) / std

class OGMNavigationDataset(Dataset):
    def __init__(self, 
                 map_base_path: str, 
                 other_path: str,
                 tag: str,
                 hist_seq_length: int,
                 pred_seq_length: int,
                 device: Union[torch.device, str] = 'cpu',
                 split: str = 'train') -> None:
        self.map_base_path = os.path.join(map_base_path, split)
        self.other_path = other_path
        self.tag = tag
        self.split = split
        self.hist_seq_length = hist_seq_length
        self.pred_seq_length = pred_seq_length
        self.seq_len = hist_seq_length + pred_seq_length
        self.map_files = get_instance_path(self.map_base_path)
        self.device = device if isinstance(device, torch.device) \
                      else torch.device(device)
        
        self.pos_files = []
        self.vel_files = []
        self.scan_files = []
        scan_meta_file = os.path.join(other_path, split, 'scans', split+'.txt')
        pos_meta_file = os.path.join(other_path, split, 'positions', split+'.txt')
        vel_meta_file = os.path.join(other_path, split, 'velocities', split+'.txt')
        with open(scan_meta_file, 'r') as f:
            for line in f.read().split('\n'):
                if '.npy' in line:
                    self.scan_files.append(os.path.join(other_path, split, 'scans', line))
        with open(pos_meta_file, 'r') as f:
            for line in f.read().split('\n'):
                if '.npy' in line:
                    self.pos_files.append(os.path.join(other_path, split, 'positions', line))
        with open(vel_meta_file, 'r') as f:
            for line in f.read().split('\n'):
                if '.npy' in line:
                    self.vel_files.append(os.path.join(other_path, split, 'velocities', line))
        self.pos_files = sorted(self.pos_files)
        self.vel_files = sorted(self.vel_files)
        self.scan_files = sorted(self.scan_files)
        self._check_consistency()
        self.pos = load_from_npy(self.pos_files)
        self.vel = load_from_npy(self.vel_files)
        self.length = len(self.map_files) - self.seq_len + 1
        
        # const
        self.map_shape = (64,64)
         
    def __len__(self) -> int:
        return self.length
    
    def __getitem__(self, idx: int) -> Dict[str, torch.tensor]:
        if self.tag.lower() == 'local':
            return self.get_local_data(idx)
        elif self.tag.lower() == 'motion_compensated':
            return self.get_motion_compensation_data(idx)
        else:
            raise NotImplementedError(f"tag {self.tag} not implemented")
    
    def get_local_data(self, idx: int) -> Dict[str, torch.tensor]:
        start=idx
        mid=idx+self.hist_seq_length
        end=idx+self.seq_len
        maps = np.zeros((self.seq_len, *self.map_shape))
        for i in range(start, end):
            maps[i-idx] = iio.imread(self.map_files[i]).astype(np.float32) / 255.
        maps_hist = torch.from_numpy(maps[:self.hist_seq_length]).unsqueeze(1)
        maps_pred = torch.from_numpy(maps[self.hist_seq_length:self.seq_len]).unsqueeze(1)
        vel_normalized = normalize_feature(self.vel)
        vels_hist_normalized = vel_normalized[start:mid]
        vels_pred_normalized = vel_normalized[mid:end]
        vels_hist = self.vel[start:mid]
        vels_pred = self.vel[mid:end]
        pos_hist = self.pos[start:mid]
        pos_pred = self.pos[mid:end]
        data = {
            'maps_hist': maps_hist,
            'maps_pred': maps_pred,
            'vels_hist': torch.from_numpy(vels_hist),
            'vels_pred': torch.from_numpy(vels_pred),
            'vels_hist_normalized': torch.from_numpy(vels_hist_normalized),
            'vels_pred_normalized': torch.from_numpy(vels_pred_normalized),
            'pos_hist': torch.from_numpy(pos_hist),
            'pos_pred': torch.from_numpy(pos_pred)
        }
        return data

    @torch.no_grad()
    def get_motion_compensation_data(self, idx: int) -> Dict[str, torch.tensor]:
        start=idx
        mid=idx+self.hist_seq_length
        end=idx+self.seq_len
        scans_hist = []
        for i in range(start, mid):
            scans_hist.append(np.load(self.scan_files[i]))
        scans_hist = np.stack(scans_hist, axis=0)
        data = self.get_local_data(idx)
        maps_hist, maps_pred, vels_hist, vels_pred, pos_hist, pos_pred, vels_hist_normalized, vels_pred_normalized = \
        data['maps_hist'], data['maps_pred'], data['vels_hist'], data['vels_pred'], data['pos_hist'], data['pos_pred'], \
        data['vels_hist_normalized'], data['vels_pred_normalized']
        mask_gridMap = LocalMap(X_lim = [0, 6.4], 
                    Y_lim = [-3.2, 3.2], 
                    resolution = 0.1, 
                    p = 0.5,
                    size=[1, self.pred_seq_length],
                    device = torch.device('cpu'))
        obs_pos_N = pos_hist[-1][None, ...]
        vel_N = vels_hist[-1][None, ...]
        cond_maps = []
        for T in range(1, self.pred_seq_length+1):
            pos_origin = mask_gridMap.origin_pose_prediction(vel_N, obs_pos_N, T)
            pos = pos_hist[None, ...]
            x_odom, y_odom, theta_odom = mask_gridMap.robot_coordinate_transform(pos, pos_origin)
            distances = torch.from_numpy(scans_hist[None, ...])
            angles = torch.linspace(-(135*np.pi/180), 135*np.pi/180, distances.shape[-1])
            distances_x, distances_y = mask_gridMap.lidar_scan_xy(distances, angles, x_odom, y_odom, theta_odom)
            cond_binary_maps = mask_gridMap.discretize(distances_x, distances_y)
            cond_binary_maps.unsqueeze_(2)
            cond_maps.append(cond_binary_maps[0][-1])
        cond_maps = torch.stack(cond_maps, dim=0)
        data = {
            'maps_hist': maps_hist, # [b, 10, 1 , 64, 64]
            'maps_pred': maps_pred,
            'vels_hist': vels_hist_normalized,
            'vesl_pred': vels_pred_normalized,
            'cond_maps': cond_maps
        }
        return data
         
    def _check_consistency(self):
        def check_order(idx: int) -> int:
            map_order = self.map_files[idx].split('/')[-1].split('.')[0]
            pos_order = self.pos_files[idx].split('/')[-1].split('.')[0]
            vel_order = self.vel_files[idx].split('/')[-1].split('.')[0]
            if map_order == pos_order and pos_order == vel_order:
                return 1
            else:
                return 0
        order_matching = [check_order(i) for i in range(len(self.map_files))]
        length_match = len(self.map_files) == len(self.pos_files) and \
            len(self.pos_files) == len(self.vel_files) and len(self.map_files) == sum(order_matching)
        assert length_match
        
        
        
if __name__ == "__main__":
    map_base_path = "OGM-Turtlebot2-maps"
    other_path = "OGM-datasets/OGM-Turtlebot2"
    dset = OGMNavigationDataset(map_base_path=map_base_path,
                                other_path=other_path,
                                hist_seq_length=10,
                                pred_seq_length=10,
                                tag='motion_compensated')
    # dset = OGMNavigationDataset(map_base_path=map_base_path,
    #                             other_path=other_path,
    #                             hist_seq_length=10,
    #                             pred_seq_length=10,
    #                             tag='local')
    # for i in range(dset.length):
    #     batch = dset.get_motion_compensation_data(i)
    #     print(type(batch))
    #     for k,v in batch.items():
    #         print(f'{k} shape: {v.shape}')
    #         print(type(v))
    
    dloader = DataLoader(dset, batch_size=1, shuffle=False, drop_last=False)
    for batch in dloader:
        for k,v in batch.items():
            print(f'{k} shape: {v.shape}')