from __future__ import division
import os
import random
import tensorflow as tf

class DataLoader(object):
    def __init__(self, 
                 dataset_dir=None,
                 filenames_file = None,
                 batch_size=None, 
                 img_height=None, 
                 img_width=None, 
                 num_source=None, 
                 num_scales=None):
        self.dataset_dir = dataset_dir
        self.filenames_file = filenames_file
        self.batch_size = batch_size
        self.img_height = img_height
        self.img_width = img_width
        self.num_source = num_source
        self.num_scales = num_scales

    def load_train_batch(self):
        """Load a batch of training instances.
        """
        with tf.name_scope('load_train_batch'):
            seed = random.randint(0, 2 ** 31 - 1)
            # Load the list of training files into queues
            file_list = self.format_file_list(self.dataset_dir, 'train')
            image_paths_queue = tf.train.string_input_producer(
                file_list['image_file_list'],
                seed=seed,
                shuffle=True)
            cam_paths_queue = tf.train.string_input_producer(
                file_list['cam_file_list'],
                seed=seed,
                shuffle=True)
            rel_pose_path_quene = tf.train.string_input_producer(
                file_list['rel_pose__file_list'],
                seed=seed,
                shuffle=True)
            self.steps_per_epoch = int(
                len(file_list['image_file_list']) // self.batch_size)

            # Load images
            img_reader = tf.WholeFileReader()
            _, image_contents = img_reader.read(image_paths_queue)  ###一次性读图？
            image_seq_raw = tf.image.decode_jpeg(image_contents)

            image_show = tf.image.convert_image_dtype(image_seq_raw,
                                                      dtype=tf.float32)  # tensorflow中操作多为浮点型，而图片多为int型，故作此转化
            image_batch = tf.expand_dims(image_show, 0)

            kernel = tf.constant([
                [
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]]
                ],
                [
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[8., 0., 0.], [0., 8., 0.], [0., 0., 8.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]]
                ],
                [
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]]
                ]
            ])

            conv2d = tf.nn.conv2d(image_batch, kernel, [1, 1, 1, 1], padding='SAME')
            activation_map = tf.minimum(tf.nn.relu(conv2d), 1)  # 激活措施加均值操作将颜色值置于（0~255）以内的区间
            encoded_image = tf.squeeze(activation_map, axis=0)
            # print(encoded_image.shape)
            image_seq = tf.image.convert_image_dtype(encoded_image, dtype=tf.uint8)


            # tgt_image, src_image_stack = \
            #     self.unpack_image_sequence(
            #         image_seq, self.img_height, self.img_width, self.num_source)

            image_seq_concat = \
                self.unpack_image_sequence(
                    image_seq, self.img_height, self.img_width, self.num_source)

            # Load camera intrinsics
            cam_reader = tf.TextLineReader()
            _, raw_cam_contents = cam_reader.read(cam_paths_queue)
            rec_def = []
            for i in range(9):
                rec_def.append([1.])
            raw_cam_vec = tf.decode_csv(raw_cam_contents,
                                        record_defaults=rec_def)
            raw_cam_vec = tf.stack(raw_cam_vec)
            intrinsics = tf.reshape(raw_cam_vec, [3, 3])

            # load_gt_pose
            pose_reader = tf.WholeFileReader()
            _, pose_contents = pose_reader.read(rel_pose_path_quene)
            split_line = tf.string_split([pose_contents], delimiter=' \n').values
            rel_pose = tf.reshape(split_line, [self.num_source, 6], name='rel_pose')

            # min_after_dequeue = 2048
            # capacity = min_after_dequeue + 4 * self.batch_size
            # Form training batches
            image_seq_concat, intrinsics, rel_pose = \
                tf.train.batch([image_seq_concat, intrinsics, rel_pose],
                               batch_size=self.batch_size)

            # Data augmentation
            # image_all = tf.concat([tgt_image, src_image_stack], axis=3)
            image_all = image_seq_concat

            image_all, intrinsics = self.data_augmentation(
                image_all, intrinsics, self.img_height, self.img_width)

            # tgt_image = image_all[:, :, :, :3]
            # src_image_stack = image_all[:, :, :, 3:]
            intrinsics = self.get_multi_scale_intrinsics(
                intrinsics, self.num_scales)
            return image_all, intrinsics, rel_pose

    def load_val_batch(self):
        """Load a batch of training instances.
        """
        with tf.name_scope('load_val_batch'):
            seed = random.randint(0, 2 ** 31 - 1)
            # Load the list of training files into queues
            file_list = self.format_file_list(self.dataset_dir, 'val')
            image_paths_queue = tf.train.string_input_producer(
                file_list['image_file_list'],
                seed=seed,
                shuffle=True)
            cam_paths_queue = tf.train.string_input_producer(
                file_list['cam_file_list'],
                seed=seed,
                shuffle=True)
            rel_pose_path_quene = tf.train.string_input_producer(
                file_list['rel_pose__file_list'],
                seed=seed,
                shuffle=True)
            self.steps_per_epoch_val = int(
                len(file_list['image_file_list']) // self.batch_size)

            # Load images
            img_reader = tf.WholeFileReader()
            _, image_contents = img_reader.read(image_paths_queue)  ###一次性读图？
            image_seq_raw = tf.image.decode_jpeg(image_contents)

            image_show = tf.image.convert_image_dtype(image_seq_raw,
                                                      dtype=tf.float32)  # tensorflow中操作多为浮点型，而图片多为int型，故作此转化
            image_batch = tf.expand_dims(image_show, 0)

            kernel = tf.constant([
                [
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]]
                ],
                [
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[8., 0., 0.], [0., 8., 0.], [0., 0., 8.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]]
                ],
                [
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
                    [[-1., 0., 0.], [0., -1., 0.], [0., 0., -1.]]
                ]
            ])

            conv2d = tf.nn.conv2d(image_batch, kernel, [1, 1, 1, 1], padding='SAME')
            activation_map = tf.minimum(tf.nn.relu(conv2d), 1)  # 激活措施加均值操作将颜色值置于（0~255）以内的区间

            encoded_image = tf.squeeze(activation_map, axis=0)
            image_seq = tf.image.convert_image_dtype(encoded_image, dtype=tf.uint8)

            image_seq_concat = \
                self.unpack_image_sequence(
                    image_seq, self.img_height, self.img_width, self.num_source)

            # Load camera intrinsics
            cam_reader = tf.TextLineReader()
            _, raw_cam_contents = cam_reader.read(cam_paths_queue)
            rec_def = []
            for i in range(9):
                rec_def.append([1.])
            raw_cam_vec = tf.decode_csv(raw_cam_contents,
                                        record_defaults=rec_def)
            raw_cam_vec = tf.stack(raw_cam_vec)
            intrinsics = tf.reshape(raw_cam_vec, [3, 3])

            # load_gt_pose
            pose_reader = tf.WholeFileReader()
            _, pose_contents = pose_reader.read(rel_pose_path_quene)
            split_line = tf.string_split([pose_contents], delimiter=' \n').values
            rel_pose = tf.reshape(split_line, [self.num_source, 6], name='rel_pose')
            rel_pose = tf.string_to_number(rel_pose, out_type=tf.float32)

            # min_after_dequeue = 2048
            # capacity = min_after_dequeue + 4 * self.batch_size
            # Form training batches
            image_seq_concat, intrinsics, rel_pose = \
                tf.train.batch([image_seq_concat, intrinsics, rel_pose],
                               batch_size=self.batch_size)

            # Data augmentation
            # image_all = tf.concat([tgt_image, src_image_stack], axis=3)
            image_all = image_seq_concat

            image_all, intrinsics = self.data_augmentation(
                image_all, intrinsics, self.img_height, self.img_width)

            # tgt_image = image_all[:, :, :, :3]
            # src_image_stack = image_all[:, :, :, 3:]
            intrinsics = self.get_multi_scale_intrinsics(
                intrinsics, self.num_scales)
            return image_all, intrinsics, rel_pose

    def make_intrinsics_matrix(self, fx, fy, cx, cy):
        # Assumes batch input
        batch_size = fx.get_shape().as_list()[0]
        zeros = tf.zeros_like(fx)
        r1 = tf.stack([fx, zeros, cx], axis=1)
        r2 = tf.stack([zeros, fy, cy], axis=1)
        r3 = tf.constant([0.,0.,1.], shape=[1, 3])
        r3 = tf.tile(r3, [batch_size, 1])
        intrinsics = tf.stack([r1, r2, r3], axis=1)
        return intrinsics

    def data_augmentation(self, im, intrinsics, out_h, out_w):
        # Random scaling
        def random_scaling(im, intrinsics):
            batch_size, in_h, in_w, _ = im.get_shape().as_list()
            scaling = tf.random_uniform([2], 1, 1.15)
            x_scaling = scaling[0]
            y_scaling = scaling[1]
            out_h = tf.cast(in_h * y_scaling, dtype=tf.int32)
            out_w = tf.cast(in_w * x_scaling, dtype=tf.int32)
            im = tf.image.resize_area(im, [out_h, out_w])
            fx = intrinsics[:,0,0] * x_scaling
            fy = intrinsics[:,1,1] * y_scaling
            cx = intrinsics[:,0,2] * x_scaling
            cy = intrinsics[:,1,2] * y_scaling
            intrinsics = self.make_intrinsics_matrix(fx, fy, cx, cy)
            return im, intrinsics

        # Random cropping
        def random_cropping(im, intrinsics, out_h, out_w):
            # batch_size, in_h, in_w, _ = im.get_shape().as_list()
            batch_size, in_h, in_w, _ = tf.unstack(tf.shape(im))
            offset_y = tf.random_uniform([1], 0, in_h - out_h + 1, dtype=tf.int32)[0]
            offset_x = tf.random_uniform([1], 0, in_w - out_w + 1, dtype=tf.int32)[0]
            im = tf.image.crop_to_bounding_box(
                im, offset_y, offset_x, out_h, out_w)
            fx = intrinsics[:,0,0]
            fy = intrinsics[:,1,1]
            cx = intrinsics[:,0,2] - tf.cast(offset_x, dtype=tf.float32)
            cy = intrinsics[:,1,2] - tf.cast(offset_y, dtype=tf.float32)
            intrinsics = self.make_intrinsics_matrix(fx, fy, cx, cy)
            return im, intrinsics

        # Random coloring
        def random_coloring(im):
            batch_size, in_h, in_w, in_c = im.get_shape().as_list()
            im_f = tf.image.convert_image_dtype(im, tf.float32)

            # randomly shift gamma
            random_gamma = tf.random_uniform([], 0.8, 1.2)
            im_aug  = im_f  ** random_gamma

            # randomly shift brightness
            random_brightness = tf.random_uniform([], 0.5, 2.0)
            im_aug  =  im_aug * random_brightness

            # randomly shift color
            random_colors = tf.random_uniform([in_c], 0.8, 1.2)
            white = tf.ones([batch_size, in_h, in_w])
            color_image = tf.stack([white * random_colors[i] for i in range(in_c)], axis=3)
            im_aug  *= color_image

            # saturate
            im_aug  = tf.clip_by_value(im_aug,  0, 1)

            im_aug = tf.image.convert_image_dtype(im_aug, tf.uint8)

            return im_aug

        im, intrinsics = random_scaling(im, intrinsics)
        im, intrinsics = random_cropping(im, intrinsics, out_h, out_w)
        im = tf.cast(im, dtype=tf.uint8)
        # do_augment = tf.random_uniform([], 0, 1)
        # im = tf.cond(do_augment > 0.5, lambda: random_coloring(im), lambda: im)
        return im, intrinsics

    def format_file_list(self, data_root, split):
        with open(data_root + '/%s.txt' % split, 'r') as f:
            frames = f.readlines()
        subfolders = [x.split(' ')[0] for x in frames]
        frame_ids = [x.split(' ')[1][:-1] for x in frames]
        image_file_list = [os.path.join(data_root, subfolders[i], 
            frame_ids[i] + '.jpg') for i in range(len(frames))]
        cam_file_list = [os.path.join(data_root, subfolders[i], 
            frame_ids[i] + '_cam.txt') for i in range(len(frames))]
        rel_pose__file_list = [os.path.join(data_root, subfolders[i],
            frame_ids[i] + '_gt_rel_pose.txt') for i in range(len(frames))]
        all_list = {}
        all_list['image_file_list'] = image_file_list
        all_list['cam_file_list'] = cam_file_list
        all_list['rel_pose__file_list'] = rel_pose__file_list
        return all_list

    # def unpack_image_sequence(self, image_seq, img_height, img_width, num_source):
    #
    #     # Assuming the center image is the target frame
    #     tgt_start_idx = int(img_width * (num_source//2))
    #     tgt_image = tf.slice(image_seq,
    #                          [0, tgt_start_idx, 0],
    #                          [-1, img_width, -1])
    #     # Source frames before the target frame
    #     src_image_1 = tf.slice(image_seq,
    #                            [0, 0, 0],
    #                            [-1, int(img_width * (num_source//2)), -1])
    #     # Source frames after the target frame
    #     src_image_2 = tf.slice(image_seq,
    #                            [0, int(tgt_start_idx + img_width), 0],
    #                            [-1, int(img_width * (num_source//2)), -1])
    #     src_image_seq = tf.concat([src_image_1, src_image_2], axis=1)
    #     # Stack source frames along the color channels (i.e. [H, W, N*3])
    #     src_image_stack = tf.concat([tf.slice(src_image_seq,
    #                                 [0, i*img_width, 0],
    #                                 [-1, img_width, -1])
    #                                 for i in range(num_source)], axis=2)
    #     src_image_stack.set_shape([img_height,
    #                                img_width,
    #                                num_source * 3])
    #     tgt_image.set_shape([img_height, img_width, 3])
    #
    #
    #     print('image')
    #
    #     return tgt_image, src_image_stack

    def unpack_image_sequence(self, image_seq, img_height, img_width, num_source):

        image_seq_list = []
        for i in range(num_source):
            for j in range(2):
                slice_image = tf.slice(image_seq, [i * img_height, j * img_width, 0],
                                       [img_height, img_width, 3])
                image_seq_list.append(slice_image)

        # image_seq_concat = image_seq_list[0]
        # for i in range(self.num_source * 2 - 1):
        #     image_seq_concat = tf.concat([image_seq_concat, image_seq_list[i + 1]], axis=2)
        image_seq_concat = tf.concat(image_seq_list, axis=2)

        return image_seq_concat

    def batch_unpack_image_sequence(self, image_seq, img_height, img_width, num_source):
        # # Assuming the center image is the target frame
        # tgt_start_idx = int(img_width * (num_source//2))
        # tgt_image = tf.slice(image_seq,
        #                      [0, 0, tgt_start_idx, 0],
        #                      [-1, -1, img_width, -1])
        # # Source frames before the target frame
        # src_image_1 = tf.slice(image_seq,
        #                        [0, 0, 0, 0],
        #                        [-1, -1, int(img_width * (num_source//2)), -1])
        # # Source frames after the target frame
        # src_image_2 = tf.slice(image_seq,
        #                        [0, 0, int(tgt_start_idx + img_width), 0],
        #                        [-1, -1, int(img_width * (num_source//2)), -1])
        # src_image_seq = tf.concat([src_image_1, src_image_2], axis=2)
        # # Stack source frames along the color channels (i.e. [B, H, W, N*3])
        # src_image_stack = tf.concat([tf.slice(src_image_seq,
        #                             [0, 0, i*img_width, 0],
        #                             [-1, -1, img_width, -1])
        #                             for i in range(num_source)], axis=3)
        image_seq_list = []
        for i in range(num_source):
            for j in range(2):
                slice_image = tf.slice(image_seq, [0, i * img_height, j * img_width, 0],
                                       [-1, img_height, img_width, 3])
                image_seq_list.append(slice_image)

        image_seq_concat = tf.concat(image_seq_list, axis=3)

        return image_seq_concat

    def get_multi_scale_intrinsics(self, intrinsics, num_scales):
        intrinsics_mscale = []
        # Scale the intrinsics accordingly for each scale
        for s in range(num_scales):
            fx = intrinsics[:,0,0]/(2 ** s)
            fy = intrinsics[:,1,1]/(2 ** s)
            cx = intrinsics[:,0,2]/(2 ** s)
            cy = intrinsics[:,1,2]/(2 ** s)
            intrinsics_mscale.append(
                self.make_intrinsics_matrix(fx, fy, cx, cy))
        intrinsics_mscale = tf.stack(intrinsics_mscale, axis=1)
        return intrinsics_mscale