# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Minimal data reader for GQN TFRecord datasets."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pprint
nest = tf.contrib.framework.nest

from dps.utils import Param, numpy_print_options, animate, RunningStats
from dps.datasets.base import ImageDataset, ArrayFeature, ImageFeature, IntegerFeature


DatasetInfo = collections.namedtuple(
    'DatasetInfo',
    ['basepath', 'train_size', 'test_size', 'frame_size', 'sequence_size']
)


_DATASETS = dict(
    jaco=DatasetInfo(
        basepath='jaco',
        train_size=3600,
        test_size=400,
        frame_size=64,
        sequence_size=11),

    mazes=DatasetInfo(
        basepath='mazes',
        train_size=1080,
        test_size=120,
        frame_size=84,
        sequence_size=300),

    rooms_free_camera_with_object_rotations=DatasetInfo(
        basepath='rooms_free_camera_with_object_rotations',
        train_size=2034,
        test_size=226,
        frame_size=128,
        sequence_size=10),

    rooms_ring_camera=DatasetInfo(
        basepath='rooms_ring_camera',
        train_size=2160,
        test_size=240,
        frame_size=64,
        sequence_size=10),

    # 5000 scenes per file.
    rooms_free_camera_no_object_rotations=DatasetInfo(
        basepath='rooms_free_camera_no_object_rotations',
        train_size=1,
        # train_size=2160,
        test_size=240,
        frame_size=64,
        sequence_size=10),

    shepard_metzler_5_parts=DatasetInfo(
        basepath='shepard_metzler_5_parts',
        train_size=900,
        test_size=100,
        frame_size=64,
        sequence_size=15),

    shepard_metzler_7_parts=DatasetInfo(
        basepath='shepard_metzler_7_parts',
        train_size=900,
        test_size=100,
        frame_size=64,
        sequence_size=15)
)
_NUM_CHANNELS = 3
_NUM_RAW_CAMERA_PARAMS = 5
_MODES = ('train', 'test')


def _get_dataset_files(dataset_info, mode, root, file_range):
    """Generates lists of files for a given dataset version."""
    basepath = dataset_info.basepath
    base = os.path.join(root, basepath, mode)
    files = [
        (int(f.split('-')[0]), os.path.join(base, f))
        for f in os.listdir(base) if f.endswith('.tfrecord')]
    files = {idx: f for idx, f in files}

    file_indices = list(range(*file_range))

    files = [files[i] for i in file_indices]

    print("Files after applying file_range:")
    pprint.pprint(files)

    return files


def _convert_frame_data(jpeg_data):
    decoded_frames = tf.image.decode_jpeg(jpeg_data)
    return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)


class DataReader(object):
    """Minimal queue based TFRecord reader.

    You can use this reader to load the datasets used to train Generative Query
    Networks (GQNs) in the 'Neural Scene Representation and Rendering' paper.
    See README.md for a description of the datasets and an example of how to use
    the reader.
    """

    def __init__(self,
                 dataset,
                 root,
                 mode='train',
                 file_range=slice(None),
                 # Optionally reshape frames
                 custom_frame_size=None,
                 # Queue params
                 num_threads=4,
                 capacity=256,
                 min_after_dequeue=128,
                 seed=None):
        """Instantiates a DataReader object and sets up queues for data reading.

        Args:
            dataset: string, one of ['jaco', 'mazes', 'rooms_ring_camera',
                    'rooms_free_camera_no_object_rotations',
                    'rooms_free_camera_with_object_rotations', 'shepard_metzler_5_parts',
                    'shepard_metzler_7_parts'].
            root: string, path to the root folder of the data.
            mode: (optional) string, one of ['train', 'test'].
            custom_frame_size: (optional) integer, required size of the returned
                    frames, defaults to None.
            num_threads: (optional) integer, number of threads used to feed the reader
                    queues, defaults to 4.
            capacity: (optional) integer, capacity of the underlying
                    RandomShuffleQueue, defualts to 256.
            min_after_dequeue: (optional) integer, min_after_dequeue of the underlying
                    RandomShuffleQueue, defualts to 128.
            seed: (optional) integer, seed for the random number generators used in
                    the reader.

        """
        if dataset not in _DATASETS:
            raise ValueError('Unrecognized dataset {} requested. Available datasets '
                             'are {}'.format(dataset, _DATASETS.keys()))

        if mode not in _MODES:
            raise ValueError('Unsupported mode {} requested. Supported modes are {}'.format(mode, _MODES))

        self._dataset_info = _DATASETS[dataset]

        self._example_size = self._dataset_info.sequence_size
        self._custom_frame_size = custom_frame_size

        with tf.device('/cpu'):
            file_names = _get_dataset_files(self._dataset_info, mode, root, file_range)

            filename_queue = tf.train.string_input_producer(file_names, seed=seed, num_epochs=1)
            reader = tf.TFRecordReader()

            read_ops = [self._make_read_op(reader, filename_queue) for _ in range(num_threads)]

            dtypes = nest.map_structure(lambda x: x.dtype, read_ops[0])
            shapes = nest.map_structure(lambda x: x.shape[1:], read_ops[0])

            self._queue = tf.RandomShuffleQueue(
                capacity=capacity,
                min_after_dequeue=min_after_dequeue,
                dtypes=dtypes,
                shapes=shapes,
                seed=seed)

            enqueue_ops = [self._queue.enqueue_many(op) for op in read_ops]
            tf.train.add_queue_runner(tf.train.QueueRunner(self._queue, enqueue_ops))

    def read(self, batch_size):
        frames, cameras = self._queue.dequeue_many(batch_size)
        return frames, cameras

    def _make_read_op(self, reader, filename_queue):
        """Instantiates the ops used to read and parse the data into tensors."""
        _, raw_data = reader.read_up_to(filename_queue, num_records=16)
        feature_map = {
            'frames': tf.FixedLenFeature(
                shape=self._dataset_info.sequence_size, dtype=tf.string),
            'cameras': tf.FixedLenFeature(
                shape=[self._dataset_info.sequence_size * _NUM_RAW_CAMERA_PARAMS],
                dtype=tf.float32)
        }
        example = tf.parse_example(raw_data, feature_map)
        indices = self._get_randomized_indices()
        frames = self._preprocess_frames(example, indices)
        cameras = self._preprocess_cameras(example, indices)
        return frames, cameras

    def _get_randomized_indices(self):
        """Generates randomized indices into a sequence of a specific length."""
        indices = tf.range(0, self._dataset_info.sequence_size)
        indices = tf.random_shuffle(indices)
        indices = tf.slice(indices, begin=[0], size=[self._example_size])
        return indices

    def _preprocess_frames(self, example, indices):
        """Instantiates the ops used to preprocess the frames data."""
        frames = tf.concat(example['frames'], axis=0)
        frames = tf.gather(frames, indices, axis=1)
        frames = tf.map_fn(
            _convert_frame_data, tf.reshape(frames, [-1]),
            dtype=tf.float32, back_prop=False)
        dataset_image_dimensions = tuple([self._dataset_info.frame_size] * 2 + [_NUM_CHANNELS])
        frames = tf.reshape(frames, (-1, self._example_size) + dataset_image_dimensions)

        if (self._custom_frame_size and self._custom_frame_size != self._dataset_info.frame_size):

            frames = tf.reshape(frames, (-1,) + dataset_image_dimensions)
            new_frame_dimensions = (self._custom_frame_size,) * 2 + (_NUM_CHANNELS,)
            frames = tf.image.resize_bilinear(frames, new_frame_dimensions[:2], align_corners=True)
            frames = tf.reshape(frames, (-1, self._example_size) + new_frame_dimensions)
        return frames

    def _preprocess_cameras(self, example, indices):
        """Instantiates the ops used to preprocess the cameras data."""
        raw_pose_params = example['cameras']
        raw_pose_params = tf.reshape(
            raw_pose_params,
            [-1, self._dataset_info.sequence_size, _NUM_RAW_CAMERA_PARAMS])
        raw_pose_params = tf.gather(raw_pose_params, indices, axis=1)

        # pos = raw_pose_params[:, :, 0:3]
        # yaw = raw_pose_params[:, :, 3:4]
        # pitch = raw_pose_params[:, :, 4:5]
        # cameras = tf.concat(
        #     [pos, tf.sin(yaw), tf.cos(yaw), tf.sin(pitch), tf.cos(pitch)], axis=2)

        return raw_pose_params


class GQN_Dataset(ImageDataset):
    dataset_name = Param()
    data_root_path = Param()
    file_range = Param()
    mode = Param()  # train or test

    n_examples = Param()
    read_batch_size = Param()

    _obs_shape = None
    depth = 3

    _artifact_names = ['pose_t_mean', 'pose_t_std']
    angle_order = ['yaw', 'pitch', 'roll']
    angle_units = 'rad'

    @property
    def features(self):
        if self._features is None:
            self._features = [
                ImageFeature("image", self.obs_shape),
                ArrayFeature("pose_r", (self.n_frames, 3,)),
                ArrayFeature("pose_t", (self.n_frames, 3,)),
                IntegerFeature("idx"),
            ]

        return self._features

    @property
    def obs_shape(self):
        if self._obs_shape is None:
            if self.postprocessing:
                self._obs_shape = (self.n_frames, *self.tile_shape, self.depth,)
            else:
                self._obs_shape = (self.n_frames, *self.image_shape, self.depth,)

        return self._obs_shape

    def _make(self):
        graph = tf.Graph()

        with graph.as_default(), tf.device("/cpu:0"):
            dataset_info = _DATASETS[self.dataset_name]
            assert 0 < self.n_frames <= dataset_info.sequence_size

            assert self.image_shape[0] == self.image_shape[1]

            reader = DataReader(
                self.dataset_name,
                root=self.data_root_path,
                mode=self.mode,
                file_range=self.file_range,
                num_threads=1,
                custom_frame_size=self.image_shape[0],
            )

            data = reader.read(self.read_batch_size)

            n_examples = 0
            pose_t_stats = RunningStats()

            with tf.train.SingularMonitoredSession() as sess:
                while True:
                    frames, cameras = sess.run(data)
                    b = frames.shape[0]

                    # In their coordinate system, a yaw of 0 looks down the negative x-axis, whereas
                    # our code assumes a yaw of 0 looks down the positive x-axis. So just add pi to the yaw.
                    yaw = cameras[..., 3:4] + np.pi
                    pitch = cameras[..., 4:5]
                    roll = np.zeros_like(pitch)
                    pose_r = np.concatenate([yaw, pitch, roll], axis=-1)

                    pose_t = cameras[..., :3]

                    pose_t_stats.add(pose_t.reshape(-1, pose_t.shape[-1]))

                    for i in range(min(b, self.n_examples-n_examples)):
                        self._write_example(
                            image=np.uint8(255. * frames[i, :self.n_frames]),
                            pose_r=pose_r[i, :self.n_frames],
                            pose_t=pose_t[i, :self.n_frames],
                            idx=n_examples
                        )
                        n_examples += 1

                        if n_examples % 100 == 0:
                            print("Processing example {}".format(n_examples))

                    if n_examples >= self.n_examples:
                        break

        if n_examples < self.n_examples:
            raise Exception("Too few examples. Found {}, wanted {}.".format(n_examples, self.n_examples))

        pose_t_mean, pose_t_var = pose_t_stats.get_stats()
        artifacts = dict(
            pose_t_mean=pose_t_mean,
            pose_t_std=np.sqrt(pose_t_var),
        )
        return artifacts

    def visualize(self, n=4):
        sample = self.sample(n)
        images = sample["image"]
        pose_r = sample["pose_r"]
        pose_t = sample["pose_t"]
        indices = sample["idx"]

        with numpy_print_options(precision=2):
            text = [
                ["idx={}\nt={}\npose_r={}\npose_t={}".format(i, t, _pr, _pt)
                 for t, (_pr, _pt) in enumerate(zip(pr, pt))]
                for i, pr, pt in zip(indices, pose_r, pose_t)
            ]

        fig, _, anim, _ = animate(images, text=text, fig_unit_size=4)

        path = 'gqn_visualization.mp4'
        anim.save(path, writer='ffmpeg', codec='hevc', extra_args=['-preset', 'ultrafast'])

        plt.show()
        plt.close(fig)


if __name__ == "__main__":
    from dps import cfg
    from dps.utils import Config

    config = Config(
        dataset_name='rooms_ring_camera',
        # dataset_name='rooms_free_camera_no_object_rotations',
        data_root_path='/media/data/gqn-dataset',
        file_range=(2, 3),
        mode='train',
        n_examples=100,
        read_batch_size=2,
        n_frames=10,
        N=16,
    )
    config.update_from_command_line()

    with config:
        dset = GQN_Dataset(_no_cache=True)

        sess = tf.Session()
        with sess.as_default():
            dset.visualize(cfg.N)
