import torch

# TODO: Bresenham algorithm for search the index of arbitrary line segment
from bresenham import bresenham_torch

class LocalMap:
    """
    This is the pytorch version of the local grid map, note the underlying data structure of 
    the map is binary map, and we initialize the map use the prior probability, here is 0.5
    """
    def __init__(self, x_lim, y_lim, resolution, p_prior, size=[1, 1], device=None):
        """
        All the distance is on the local coordinate frame,
        and the shape of the map is [batch, seq_len, width, height]
        """
        self.x_lim = x_lim
        self.y_lim = y_lim
        self.size = size
        self.resolution = resolution
        self.p_priro = p_prior
        self.device = device

        x = torch.arange(x_lim[0], x_lim[1], resolution)
        y = torch.arange(y_lim[0], y_lim[1], resolution)

        self.x_max = len(x)
        self.y_max = len(y)

        self.occ_map = torch.full((*size, len(x), len(y)), fill_value=self.log_odds(p_prior))
        if self.device:
            self.occ_map = self.occ_map.to(device)


    @staticmethod
    def log_odds(p):
        """
                      p
        l(p) = log -------
                    1 - p
        for numerical stable, assert p != 1
        """
        assert p != 1, "invalid p for log odds"
        return torch.log(p / (1 - p))
    
    @staticmethod
    def retrieve_p(log_map):
        """
        Retrieve p(x) from log odds ratio:

                        1
        p(x) = 1 - ---------------
                    1 + exp(l(x))
        """
        return 1 - 1 / (1 + torch.exp(log_map))
    
    @staticmethod
    def scan2inertial(ranges, angles, x_odom, y_odom, theta_odom):
        """
        Convert the lidar measurement in the odom frame to the inertial frame
        here angles is hard coded by the laser scanner, so expand it to fit ranges[batch, seq_len, num_points]
        and the odom's coordinate [batch, seq_len], should also be extended to the same shape as the measurements
        for batch computing
        """
        angles = angles.expand(*ranges.size())
        x_odom = x_odom.unsqueeze(2).expand(*ranges.size())
        y_odom = y_odom.unsqueeze(2).expand(*ranges.size())
        theta_odom = theta_odom.unsqueeze(2).expand(*ranges.size())

        x = x_odom + ranges * torch.cos(angles + theta_odom)
        y = y_odom + ranges * torch.sin(angles + theta_odom)

        return x, y
        
    def is_in_perceptual_field(self, x, y):
            return (x < self.x_max) & (y < self.y_max) & (x > 0) & (y > 0)

    def discretize(self, x, y):
        """
        Discretize the continuous x and y to discontinuous coord values,
        Note that the x, y are in the local frame
        """
        x = torch.floor((x - self.x_lim[0]) / self.resolution).to(torch.int32)
        y = torch.floor((y - self.y_lim[0]) / self.resolution).to(torch.int32)
        flag_valid = self.is_in_perceptual_field(x, y)
        idx_valid = torch.nonzero(flag_valid)
        x = x[idx_valid[:, 0], idx_valid[:, 1], idx_valid[:, 2]]
        y = y[idx_valid[:, 0], idx_valid[:, 1], idx_valid[:, 2]]

        binary_map = torch.zeros((self.size[0], self.size[1], self.x_max, self.y_max))
        if self.device:
             binary_map = binary_map.to(self.device)
        binary_map[idx_valid[:, 0], idx_valid[:, 1], x, y] = 1

        return binary_map
    
    def update(self, x0, y0, x, y, p_free, p_occ):
        """
        Update x and y coordinates in discretized grid map
        """
        # discretize: 
        binary_map = self.discretize(x, y)

        # find free space:
        occ_map = binary_map.clone().detach()
        for i in range(self.size[0]):
            for j in range(self.size[1]):
                end = torch.nonzero(binary_map[i, j])
                if(end.size(0) != 0): # has obstacles
                    # start point:
                    x0_r = torch.floor((x0[i,j] - self.X_lim[0]) / self.resolution).to(int)
                    y0_c = torch.floor((y0[i,j] - self.Y_lim[0]) / self.resolution).to(int)
                    start = torch.tensor([x0_r, y0_c]).to(self.device)
                    start = start.unsqueeze(1).expand(end.size(1), end.size(0)).permute(1, 0)
                    # get free points from bresenham algorithm: 
                    points = bresenham_torch(end, start, max_iter=-1)
                    x_r = points[:, 0]
                    y_c = points[:, 1]
                    flag_v = self.is_valid(x_r, y_c)
                    idx_v = torch.nonzero(flag_v)
                    # get valid grid indicies:
                    x_rv = x_r[idx_v[:,0]]
                    y_cv = y_c[idx_v[:,0]]
                    occ_map[i, j, x_rv, y_cv] = -1
    
        # update probability matrix using inverse sensor model
        self.occ_map[occ_map==-1] += self.log_odds(p_free)
        self.occ_map[occ_map==1] += self.log_odds(p_occ)

    
    def calc_MLE(self, prob_map, threshold_p_occ):
        """
        Calculate Maximum Likelihood estimate of the map (binary map)
        """
        prob_map[prob_map >= threshold_p_occ] = 1
        prob_map[prob_map < threshold_p_occ] = 0

        return prob_map
    
    def to_prob_occ_map(self, threshold_p_occ):
        """
        Transformation to GRAYSCALE image format
        """
        log_map = torch.sum(self.occ_map, dim=1)  # sum of all timestep maps
        prob_map = self.retrieve_p(log_map)
        prob_map = self.calc_MLE(prob_map, threshold_p_occ)

        return prob_map

    def origin_pose_prediction(self, vel_N, obs_pos_N, T, 
                               time_step=0.1, noise_std=[0,0,0]):
        """
        Predict the next T time step pose with the prior len(vel_seq) velocitiy seq and 
        observed pose seq, here use constant velocity model:
        d_theta = omega * t_step
        d = v * t_step
        x[t] = x[t-1] + d * cos(d_theta) * t_step + x_noise
        y[t] = y[t-1] + d * sin(d_theta) * t_step + y_noise
        phi = theta + d_theta + theta_noise
        """
        pos_origin_pre = torch.zeros(self.size[0], 3)
        # Add AWGN for every element
        x_noise = torch.randn(self.size[0]) * noise_std[0]
        y_noise = torch.randn(self.size[0]) * noise_std[1]
        theta_noise = torch.randn(self.size[0]) * noise_std[2]
        if self.device:
            pos_origin_pre = pos_origin_pre.to(self.device)
            x_noise = x_noise.to(self.device)
            y_noise = y_noise.to(self.device)              
            theta_noise = theta_noise.to(self.device)
        d = vel_N[:, 0] * time_step * T
        theta = vel_N[:, 1] * time_step * T
        pos_origin_pre[:, 0] = obs_pos_N[:, 0] + \
            d * torch.cos(obs_pos_N[:, 2]) + x_noise
        pos_origin_pre[:, 1] = obs_pos_N[:, 1] + \
            d * torch.sin(obs_pos_N[:, 2]) + x_noise
        pos_origin_pre[:, 2] = obs_pos_N[:, 2] + \
            theta + x_noise
        
        return pos_origin_pre
    
    def robot_coordinate_transform(self, pos, pos_origin_pre):
        """
        Transform the robot past SEQ_LEN poses to the predicted time_step reference frame 
        pos [batch, seq_len, 3], pos_origin [batch, 3]
        
        """
        # expand the predicted frame to the same size of the concatenated [batch, SEQ_LEN, 3] tensor
        pos_origin_pre = pos_origin_pre.unsqueeze(1).expand(*pos.size())
        dx = pos[:, :, 0] - pos_origin_pre[:, :, 0]
        dy = pos[:, :, 1] - pos_origin_pre[:, :, 1]
        theta = pos_origin_pre[:, :, 2]
        x_odom = torch.cos(theta) * dx + torch.sin(theta) * dy
        y_odom = torch.sin(-theta) * dx + torch.cos(theta) * dy
        theta_odom = pos[:, :, 2] - theta

        return x_odom, y_odom, theta_odom
