from dataset import VaeTestDataset
from torch.utils.data import DataLoader
import torch
import numpy as np
import cv2
import matplotlib.pyplot as plt
from local_mapping import LocalMap
from dataset import VaeTestDataset


P_prior = 0.5	# Prior occupancy probability
P_occ = 0.7	    # Probability that cell is occupied with total confidence
P_free = 0.3	# Probability that cell is free with total confidence
MAP_X_LIMIT = [0, 6.4]      # Map limits on the x-axis
MAP_Y_LIMIT = [-3.2, 3.2]   # Map limits on the y-axis
RESOLUTION = 0.1        # Grid resolution in [m]'
TRESHOLD_P_OCC = 0.8    # Occupancy threshold

class OGMVisualizer(object):
    def __init__(self,
                 batch_size,
                 seq_len,
                 device,
                 dset_path,
                 train_flag,
                 fps = 30):
        '''

        :param batch_size:
        :param seq_len: NOTE that the history seq_len and predict seq_len are both seq_len
        :param device:
        :param dset_path:
        :param train_flag:
        :param fps:
        '''
        self.batch_size = batch_size
        self.seq_len = seq_len
        self.device = device
        self.fps = 10
        self.dset = VaeTestDataset(dset_path, train_flag, seq_len=self.seq_len)
        self.dloader = DataLoader(self.dset, batch_size=1,
                         shuffle=True, drop_last=True)

        # the mapper
        self.input_gridMap = LocalMap(X_lim=MAP_X_LIMIT,
                                 Y_lim=MAP_Y_LIMIT,
                                 resolution=RESOLUTION,
                                 p=P_prior,
                                 size=[self.batch_size, self.seq_len*2],
                                 device=self.device)
        # const here
        self.image_shape = (64, 64)

    def mapping(self, demo=True):
        '''
        N0TE here only first batch is returned for making a demo video
        :param demo:
        :return:
        '''
        for batch in self.dloader:
            scans, positions, velocities = batch['scan'], batch['position'], batch['velocity']
            scans = scans.to(self.device)
            # positions = positions.to(self.device)
            # velocities = velocities.to(self.device)

            # robot positions
            x_odom = torch.zeros(self.batch_size, self.seq_len*2).to(self.device)
            y_odom = torch.zeros(self.batch_size, self.seq_len*2).to(self.device)
            theta_odom = torch.zeros(self.batch_size, self.seq_len*2).to(self.device)
            # Lidar measurements
            distances = scans
            # the angles of the lidar
            angles = torch.linspace(-(135 * np.pi / 180), 135 * np.pi / 180, distances.shape[-1]).to(self.device)
            distance_x, distance_y = self.input_gridMap.lidar_scan_xy(distances, angles, x_odom, y_odom, theta_odom)
            maps = self.input_gridMap.discretize(distance_x, distance_y)
            # print(maps.shape)
            # for viz in opencv
            # map_viz = maps[0,:1].to("cpu").permute(1,2,0).numpy()
            # cv2.imshow("test", map_viz)
            # cv2.waitKey()
            return maps.squeeze().to("cpu").numpy().astype('uint8')*255

    def record(self, video_file_name):
        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        video_writer = cv2.VideoWriter(video_file_name,
                                       fourcc,
                                       self.fps,
                                       self.image_shape,
                                       0) # 0 for isColor flag set to False
        # Acquire a stacked frames from the mapping func
        maps_stacked = self.mapping()
        # print(maps_stacked.shape)
        assert maps_stacked.shape[1:] == self.image_shape, "invalid frame size"
        for frame in maps_stacked:
            frame = frame[..., np.newaxis]
            # cv2.imshow("test", frame)
            # cv2.waitKey()
            video_writer.write(frame)
        video_writer.release()



if __name__ == "__main__":
    dset_path = "/data/OGM-datasets/OGM-Turtlebot2/train"
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    viz = OGMVisualizer(batch_size=1,
                        seq_len=100,
                        device=device,
                        dset_path=dset_path,
                        train_flag="train")

    viz.record("test.avi")