"""
read gqa_objects.h5 and gqa_objects_merged_info.json to get
train36.hdf5, val36.hdf5;
train_ids.pkl, val_ids.pkl;
train36_imgid2idx.pkl, val36_imgid2idx.pkl

Hierarchy of HDF5 file:

{ 'image_features': num_images x num_boxes x 2048 array of features
  'image_bb': num_images x num_boxes x 4 array of bounding boxes }
"""
from __future__ import print_function

import os
import sys

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# import base64
import csv
import h5py
import pickle
import numpy as np
import utils

import get_data_from_info_and_h5

# csv.field_size_limit(sys.maxsize)

# FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
# infile = 'data/trainval_36/trainval_resnet101_faster_rcnn_genome_36.tsv'
train_data_file = '../data_gqa/train36.hdf5'
val_data_file = '../data_gqa/val36.hdf5'
train_indices_file = '../data_gqa/train36_imgid2idx.pkl'
val_indices_file = '../data_gqa/val36_imgid2idx.pkl'
train_ids_file = '../data_gqa/train_ids.pkl'
val_ids_file = '../data_gqa/val_ids.pkl'
all_train_data = '../data_gqa/all_train_data.json'
all_val_data = '../data_gqa/all_val_data.json'

feature_length = 2048
num_fixed_boxes = 36


def get_spatial_features(image_w, image_h, bboxes):
    box_width = bboxes[:, 2] - bboxes[:, 0]
    box_height = bboxes[:, 3] - bboxes[:, 1]
    scaled_width = box_width / image_w
    scaled_height = box_height / image_h
    scaled_x = bboxes[:, 0] / image_w
    scaled_y = bboxes[:, 1] / image_h

    box_width = box_width[..., np.newaxis]
    box_height = box_height[..., np.newaxis]
    scaled_width = scaled_width[..., np.newaxis]
    scaled_height = scaled_height[..., np.newaxis]
    scaled_x = scaled_x[..., np.newaxis]
    scaled_y = scaled_y[..., np.newaxis]

    spatial_features = np.concatenate(
        (scaled_x,
         scaled_y,
         scaled_x + scaled_width,
         scaled_y + scaled_height,
         scaled_width,
         scaled_height),
        axis=1)
    return spatial_features


def get_data_to_compute_spatial_features(image_id):
    image_w, image_h = get_data_from_info_and_h5.gqa_merged_info_size_loader(image_id)
    bboxes = get_data_from_info_and_h5.gqa_objects_bboxes_loader(image_id)
    return image_w, image_h, bboxes


if __name__ == '__main__':
    h_train = h5py.File(train_data_file, "w")
    h_val = h5py.File(val_data_file, "w")

    if os.path.exists(train_ids_file) and os.path.exists(val_ids_file):
        train_imgids = pickle.load(open(train_ids_file))
        print('train_imgids has existed!')
        val_imgids = pickle.load(open(val_ids_file))
        print('val_imgids has existed!')
    else:
        train_imgids = utils.load_imageid_gqa(all_train_data)
        val_imgids = utils.load_imageid_gqa(all_val_data)
        pickle.dump(train_imgids, open(train_ids_file, 'wb'))
        print('making train_ids.pkl has ended!')
        pickle.dump(val_imgids, open(val_ids_file, 'wb'))
        print('making val_ids.pkl has ended!')

    train_indices = {}
    val_indices = {}

    train_img_features = h_train.create_dataset(
        'image_features', (len(train_imgids), num_fixed_boxes, feature_length), 'f')
    train_img_bb = h_train.create_dataset(
        'image_bb', (len(train_imgids), num_fixed_boxes, 4), 'f')
    train_spatial_img_features = h_train.create_dataset(
        'spatial_features', (len(train_imgids), num_fixed_boxes, 6), 'f')

    val_img_bb = h_val.create_dataset(
        'image_bb', (len(val_imgids), num_fixed_boxes, 4), 'f')
    val_img_features = h_val.create_dataset(
        'image_features', (len(val_imgids), num_fixed_boxes, feature_length), 'f')
    val_spatial_img_features = h_val.create_dataset(
        'spatial_features', (len(val_imgids), num_fixed_boxes, 6), 'f')

    train_counter = 0
    val_counter = 0

    for image_id in train_imgids:
        # train_imgids.remove(image_id)
        print(train_counter)
        if len(train_imgids) == 0:
            print('Warning: train_image_ids is empty.')

        image_w, image_h, bboxes = get_data_to_compute_spatial_features(image_id)
        image_features = get_data_from_info_and_h5.gqa_objects_feature_loader(image_id)

        spatial_features = get_spatial_features(image_w, image_h, bboxes)

        train_indices[image_id] = train_counter
        train_img_bb[train_counter, :, :] = bboxes
        train_img_features[train_counter, :, :] = image_features
        train_spatial_img_features[train_counter, :, :] = spatial_features

        train_counter += 1

    pickle.dump(train_indices, open(train_indices_file, 'wb'))
    print('making train_indices.pkl has ended!')
    h_train.close()
    print('making train36.hdf5 has ended!')

    for image_id in val_imgids:
        # val_imgids.remove(image_id)
        print(val_counter)
        if len(val_imgids) == 0:
            print('Warning: val_image_ids is empty.')

        image_w, image_h, bboxes = get_data_to_compute_spatial_features(image_id)
        image_features = get_data_from_info_and_h5.gqa_objects_feature_loader(image_id)

        spatial_features = get_spatial_features(image_w, image_h, bboxes)

        val_indices[image_id] = val_counter
        val_img_bb[val_counter, :, :] = bboxes
        val_img_features[val_counter, :, :] = image_features
        val_spatial_img_features[val_counter, :, :] = spatial_features

        val_counter += 1

    pickle.dump(val_indices, open(val_indices_file, 'wb'))
    print('making val_indices.pkl has ended!')
    h_val.close()
    print('making val36.hdf5 has ended!')

    print("done!")


