import torch
import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader
from tensorboardX import SummaryWriter
import os
import os.path as osp
import cv2
import logging
# from torch.utils.tensorboard import SummaryWriter

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class OGMMotionCompensatedDataset(Dataset):
    def __init__(self,
                 name,
                 dataset_path,
                 train_flag):
        '''
        NOTE the transformed raw sensor input data follows the protol:
        input sequence length = 10,
        predicted sequence length = 1 AKA predict only one future frame, but we set the default predicted length to 10

        the motion compensated dataset is that the motion compensation mentioned
        in paper SOGMP is applied, the history sequence and predicted future frames are seperated in path "in" and "out"
        and the no motion compensation dataset is the plain local maps constructed from the raw sensor input on every frame
        and are listed just in the path train, val and test.
        :param name: the dataset name
        :param dataset_path: the path to the dataset, for example `OGM-Turtlebot2`
        :param train_flag: train, validate or test
        '''
        self.name = name
        self.dataset_path = dataset_path
        self.train_flag = train_flag

        assert train_flag in ['train', 'val', 'test'], "Must be train, val or test"

        self.history_data_path = osp.join(self.dataset_path, self.train_flag, "in")
        self.future_data_path = osp.join(self.dataset_path, self.train_flag, "out")
        self.in_file_names = sorted(os.listdir(self.history_data_path), key=lambda x: int(x.split('.')[0]))
        self.out_file_names = sorted(os.listdir(self.future_data_path), key=lambda x: int(x.split('.')[0]))
        self.in_file_names = [osp.join(self.history_data_path, i) for i in self.in_file_names]
        self.out_file_names = [osp.join(self.future_data_path, i) for i in self.out_file_names]
        # print(self.in_file_names[0])
        # print(self.out_file_names[0])
        self.total_file_nums = len(self.in_file_names)
        self.length = self.total_file_nums // 10
        logger.info(f"The total length of the dataset: {self.length}")

        # The seq_len here is const, seq_len = 10, also the predicted future seq_len is 10
        # The shape of the local here is also const, as [64, 64, 1], the width, height are 64 pixels
        # and channel is 1 means they are grayscale image (actually binary images)
        self.k_seq_len = 10
        # self.k_future_seq_len = 10
        self.k_img_shape = (64, 64)

    def __len__(self):
        return self.length

    def __getitem__(self, idx):
        '''
        Batch data is a dictionary in which the history sequence is [b, t, c, h, w]
        as well as the future sequence.
        :param idx:
        :return:
        '''
        full_seq_len = self.k_seq_len
        in_sequence = np.zeros((self.k_seq_len, *self.k_img_shape)).astype(np.float32)
        out_sequence = np.zeros((self.k_seq_len, *self.k_img_shape)).astype(np.float32)
        # Get the index of the start pionts, NOTE that every 10 frames form a sequence:
        idx = idx * 10
        if idx + full_seq_len < self.total_file_nums:
            idx_s = idx
        else:
            # never overflow
            idx_s = idx - full_seq_len

        for i in range(full_seq_len):
            # get the images
            in_file_name = self.in_file_names[idx_s + i]
            out_file_name = self.out_file_names[idx_s + i]
            in_image = cv2.imread(in_file_name, cv2.IMREAD_GRAYSCALE) / 255.
            out_image = cv2.imread(out_file_name, cv2.IMREAD_GRAYSCALE) / 255.
            in_sequence[i] = in_image
            out_sequence[i] = out_image

        # Grab the image sequences and add channel, to the form [seq_len, c=1, h, w]
        in_sequence = torch.from_numpy(in_sequence).unsqueeze(1)
        out_sequence = torch.from_numpy(out_sequence).unsqueeze(1)

        # one batch contains historical sequence of 10 frames
        # and future sequence of 10 frames as well
        one_batch = {
            "hist_seq": in_sequence,
            "future_seq": out_sequence
        }

        return one_batch


class OGMPlainDataset(Dataset):
    def __init__(self,
                 name,
                 dataset_path,
                 train_flag):
        self.name = name
        self.dataset_path = dataset_path
        self.train_flag = train_flag

        assert train_flag in ['train', 'val', 'test'], "Must be train, val or test"
        self.data_path = osp.join(self.dataset_path, self.train_flag)
        self.file_names = sorted(os.listdir(self.data_path), key=lambda x: int(x.split('.')[0]))
        self.file_names = [osp.join(self.data_path, i) for i in self.file_names]
        self.total_file_nums = len(self.file_names)
        self.length = self.total_file_nums // 10 - 1
        logger.info(f"The total length of the dataset is {self.length}")


        # Define some constants
        self.k_seq_len = 10
        self.k_img_shape = (64, 64)

    def __len__(self):
        return self.length

    def __getitem__(self, idx):
        full_seq_len = self.k_seq_len
        in_sequence = np.zeros((self.k_seq_len, *self.k_img_shape)).astype(np.float32)
        out_sequence = np.zeros((self.k_seq_len, *self.k_img_shape)).astype(np.float32)
        idx *= 10
        if idx + full_seq_len < self.total_file_nums:
            idx_s = idx
        else:
            # never overflow
            idx_s = idx - full_seq_len

        for i in range(full_seq_len):
            # get the images
            in_file_name = self.file_names[idx_s + i]
            out_file_name = self.file_names[idx_s + i + self.k_seq_len]
            in_image = cv2.imread(in_file_name, cv2.IMREAD_GRAYSCALE) / 255.
            out_image = cv2.imread(out_file_name, cv2.IMREAD_GRAYSCALE) / 255.
            in_sequence[i] = in_image
            out_sequence[i] = out_image

        # Grab the image sequences and add channel, to the form [seq_len, c=1, h, w]
        in_sequence = torch.from_numpy(in_sequence).unsqueeze(1)
        out_sequence = torch.from_numpy(out_sequence).unsqueeze(1)

        # one batch contains historical sequence of 10 frames
        # and future sequence of 10 frames as well
        one_batch = {
            "hist_seq": in_sequence,
            "future_seq": out_sequence
        }

        return one_batch


if __name__ == "__main__":
    # dset = OGMMotionCompensatedDataset("trainset",
    #                   "OGM-datasets-motion-compensation/OGM-Turtlebot2",
    #                   "val")
    # dset = OGMDataset("trainset",
    #                   "OGM-datasets-motion-compensation/OGM-Turtlebot2",
    #                   "train")
    # dloader = DataLoader(dset, batch_size=1, shuffle=False, drop_last=True)
    # inspect_every = 1000
    # writer = SummaryWriter()
    # for i,batch in enumerate(dloader):
        # if i % inspect_every == 0:
        #     in_seq, out_seq = batch['hist_seq'], batch['future_seq']
            # in_grid = torchvision.utils.make_grid(in_seq[0])
            # writer.add_image(f"in_seq_{i}", in_grid, i / inspect_every)
    # writer.close()
    # for i, batch in enumerate(dloader):
    #     in_seq, out_seq = batch['hist_seq'], batch['future_seq']
    #     for t in range(in_seq.size(1)):
    #         in_img = out_seq[0, t].permute(1, 2, 0).numpy()*255
    #         cv2.imshow("test", in_img)
    #         cv2.waitKey()
            # print(t)
            # if t % 9 == 0 and t != 0:
            #     out_img = out_seq[0, 0].permute(1, 2, 0).numpy()*255
            #     cv2.imshow("future", out_img)
            #     cv2.waitKey()

    dset = OGMPlainDataset("trainset",
                      "OGM-datasets-no-motion-compensation/OGM-Turtlebot2",
                      "train")

    # dset[0]

    dloader = DataLoader(dset, batch_size=1, shuffle=False, drop_last=True)

    for i, batch in enumerate(dloader):
        in_seq, out_seq = batch['hist_seq'], batch['future_seq']
        for t in range(in_seq.size(1)):
            in_img = out_seq[0, t].permute(1, 2, 0).numpy()*255
            cv2.imshow("test", in_img)
            cv2.waitKey()