import os
from abc import ABC

import numpy as np
import random
from functools import reduce
import torch
import torch.utils.data as data
import json
import h5py
from PIL import Image, ImageFile
from torchvision import transforms
from misc import utils
ImageFile.LOAD_TRUNCATED_IMAGES = True


class DataLoader(data.Dataset):

    def reset_iterator(self, split):
        del self._prefetch_process[split]
        self._prefetch_process[split] = BlobFetcher(split, self, split == 'train')
        self.iterators[split] = 0

    def get_vocab(self):
        return self.ix_to_word

    def __init__(self, opt):
        self.opt = opt
        self.batch_size = self.opt.batch_size
        self.seq_per_img = self.opt.seq_per_img
        self.transform = transforms.Compose([
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406),
                                 (0.229, 0.224, 0.225))])
        print('Dataloader loding json file:', opt.input_json)
        self.info = json.load(open(self.opt.input_json))
        self.rel_bboxes_dir = self.opt.input_rel_box_dir
        self.ix_to_word = self.info['ix_to_word']
        self.vocab_size = len(self.ix_to_word)
        print('vocab size is:', self.vocab_size)
        print('DataLoader loading h5 file:', self.opt.input_label_h5)
        self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
        seq_size = self.h5_label_file['labels'].shape  # (616767 * 16)
        # seq_info = self.h5_label_file['labels']
        self.seq_length = seq_size[1]
        print('max sequence length in data is', self.seq_length)
        self.label_start_ix = self.h5_label_file['label_start_ix'][:]
        self.label_end_ix = self.h5_label_file['label_end_ix'][:]
        self.num_images = self.label_start_ix.shape[0]
        print('read %d image features' % self.num_images)

        # separate out indexes for each provided splits
        self.split_ix = {'train': [], 'val': [], 'test': []}
        for ix in range(len(self.info['images'])):
            img = self.info['images'][ix]
            if img['split'] == 'train':
                self.split_ix['train'].append(ix)
            elif img['split'] == 'val':
                self.split_ix['val'].append(ix)
            elif img['split'] == 'test':
                self.split_ix['test'].append(ix)
            elif opt.train_only == 0:
                self.split_ix['train'].append(ix)
        print('assigned %d images to split train' % len(self.split_ix['train']))
        print('assigned %d images to split val' % len(self.split_ix['val']))
        print('assigned %d images to split test' % len(self.split_ix['test']))

        self.iterators = {'train': 0, 'val': 0, 'test': 0}
        self._prefetch_process = {}  # The three prefetch process
        for split in self.iterators.keys():
            self._prefetch_process[split] = BlobFetcher(split, self, split == 'train')
        
        def cleanup():
            print('Terminating BlobFetcher')
            for split in self.iterators.keys():
                del self._prefetch_process[split]
        import atexit
        atexit.register(cleanup)

    def get_batch(self, split, batch_size=None, seq_per_img=None):
        batch_size = batch_size or self.batch_size
        seq_per_img = seq_per_img or self.seq_per_img

        img_batch = []
        fc_batch = []
        att_batch = []
        label_batch = np.zeros([batch_size * seq_per_img, self.seq_length + 2], dtype='int')
        mask_batch = np.zeros([batch_size * seq_per_img, self.seq_length + 2], dtype='float32')

        wrapped = False

        infos = []
        gts = []
        
        boxes_batch = []
        for i in range(batch_size):
            img, tmp_fc, tmp_att, tmp_box_coords, ix, tmp_wrapped = self._prefetch_process[split].get()
            img_batch.append(img)
            att_batch.append(tmp_att)
            fc_batch.append(tmp_fc)
            boxes_batch.append(tmp_box_coords)
            
            label_batch[i * seq_per_img: (i + 1) * seq_per_img, 1: self.seq_length + 1] = self.get_captions(ix, seq_per_img)

            if tmp_wrapped:
                wrapped = True

            gts.append(self.h5_label_file['labels'][self.label_start_ix[ix] - 1: self.label_end_ix[ix]])

            info_dict = {}
            info_dict['ix'] = ix
            info_dict['id'] = self.info['images'][ix]['id']
            info_dict['file_path'] = self.info['images'][ix]['file_path']
            infos.append(info_dict)
        
        data = {}
        img, boxes_batch, fc_batch, att_batch, label_batch, gts, infos = \
            zip(*sorted(zip(img_batch, boxes_batch, fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos),
                        key=lambda x: 0, reverse=True))

        data['fc_feats'] = np.stack(reduce(lambda x, y: x + y, [[_] * seq_per_img for _ in fc_batch]))
        data['img'] = np.stack(reduce(lambda x, y: x + y, [[_] * seq_per_img for _ in img_batch]))
        # a = data['img']
        # merge att_feats
        max_att_len = max([_.shape[0] for _ in att_batch])
        data['att_feats'] = np.zeros([len(att_batch) * seq_per_img, max_att_len, att_batch[0].shape[1]],
                                     dtype='float32')

        # data['img'] = np.zeros([len(img) * seq_per_img, img_batch[0].shape[0], img_batch[0].shape[1], img_batch[0].shape[2]], dtype='float32')

        # for i in range(len(img_batch)):
        #     data['img'][i * seq_per_img:(i + 1) * seq_per_img, :img_batch[i].shape[0], :img_batch[i].shape[1], :img_batch[i].shape[2]] = img_batch[i]

        for i in range(len(att_batch)):
            data['att_feats'][i * seq_per_img:(i + 1) * seq_per_img, :att_batch[i].shape[0]] = att_batch[i]

        data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
        for i in range(len(att_batch)):
            data['att_masks'][i * seq_per_img:(i + 1) * seq_per_img, :att_batch[i].shape[0]] = 1
        # set att_masks to None if attention features have same length
        if data['att_masks'].sum() == data['att_masks'].size:
            data['att_masks'] = None

        data['labels'] = np.vstack(label_batch)
        # generate mask
        nonzeros = np.array(list(map(lambda x: (x != 0).sum() + 2, data['labels'])))
        for ix, row in enumerate(mask_batch):
            row[:nonzeros[ix]] = 1
        data['masks'] = mask_batch

        data['gts'] = gts  # all ground truth captions of each images
        data['bounds'] = {'it_pos_now': self.iterators[split], 'it_max': len(self.split_ix[split]), 'wrapped': wrapped}
        data['infos'] = infos

        data['boxes'] = np.zeros([len(boxes_batch) * seq_per_img, max_att_len, boxes_batch[0].shape[1]], dtype='float32')
        for i in range(len(boxes_batch)):
            data['boxes'][i * seq_per_img:(i + 1) * seq_per_img, :boxes_batch[i].shape[0]] = boxes_batch[i]

        return data

    def __getitem__(self, index):
        """this function returns a tuple that is further passed to collate_fn"""
        ix = index
        # print(self.opt.input_att_dir, str(self.info['images'][ix]['id']))
        att_feat = np.load(os.path.join(self.opt.input_att_dir, str(self.info['images'][ix]['id']) + '.npz'))['feat']
        att_feat = att_feat.reshape(-1, att_feat.shape[-1])
        file_path = self.info['images'][ix]['file_path']
        root_path = '/mnt/hdd0/home/fyc/COCO_dataset/resized'
        img = Image.open(os.path.join(root_path, str(file_path))).convert('RGB')
        img_path = root_path + str(file_path)
        # img = imread(os.path.join(root_path, str(file_path)), mode='RGB')
        if self.transform is not None:
            img = self.transform(img)
        box_file = os.path.join(self.rel_bboxes_dir, str(self.info['images'][ix]['id']) + '.npy')
        box_coords = np.load(box_file)
        areas = np.expand_dims(utils.get_box_areas(box_coords), axis=1)

        box_coords_with_area = np.concatenate([box_coords, areas], axis=-1)
        return (img,
                np.load(os.path.join(self.opt.input_fc_dir, str(self.info['images'][ix]['id']) + '.npy')),
                att_feat,
                box_coords,
                ix)

    def get_captions(self, ix, seq_per_img):
        # fetch the sequence labels
        ix1 = self.label_start_ix[ix] - 1  # label_start_ix starts from 1
        ix2 = self.label_end_ix[ix] - 1
        ncap = ix2 - ix1 + 1  # number of captions available for this image
        assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'

        if ncap < seq_per_img:
            # we need to subsample (with replacement)
            seq = np.zeros([seq_per_img, self.seq_length], dtype='int')
            for q in range(seq_per_img):
                ixl = random.randint(ix1, ix2)
                seq[q, :] = self.h5_label_file['labels'][ixl, :self.seq_length]
        else:
            ixl = random.randint(ix1, ix2 - seq_per_img + 1)
            seq = self.h5_label_file['labels'][ixl: ixl + seq_per_img, :self.seq_length]

        return seq


class SubsetSampler(torch.utils.data.sampler.Sampler):
    def __init__(self, indices):
        self.indices = indices

    def __iter__(self):
        return (self.indices[i] for i in range(len(self.indices)))

    def __len__(self):
        return len(self.indices)


class BlobFetcher():
    """Experimental class for prefetching blobs in a separate process."""
    def __init__(self, split, dataloader, if_shuffle=False):
        self.split = split
        self.dataloader = dataloader
        self.if_shuffle = if_shuffle

    def reset(self):
        self.split_loader = iter(data.DataLoader(dataset=self.dataloader,
                                                 batch_size=1,
                                                 sampler=SubsetSampler(self.dataloader.split_ix[self.split][self.dataloader.iterators[self.split]:]),
                                                 shuffle=False,
                                                 pin_memory=True,
                                                 num_workers=0,
                                                 collate_fn=lambda x: x[0]))

    def _get_next_minibatch_inds(self):
        max_index = len(self.dataloader.split_ix[self.split])
        wrapped = False

        ri = self.dataloader.iterators[self.split]
        ix = self.dataloader.split_ix[self.split][ri]

        ri_next = ri + 1
        if ri_next >= max_index:
            ri_next = 0
            if self.if_shuffle:
                random.shuffle(self.dataloader.split_ix[self.split])
            wrapped = True
        self.dataloader.iterators[self.split] = ri_next

        return ix, wrapped

    def get(self):
        if not hasattr(self, 'split_loader'):
            self.reset()

        ix, wrapped = self._get_next_minibatch_inds()
        tmp = self.split_loader.next()
        if wrapped:
            self.reset()

        # TODO: Double-Check this is correct
        assert tmp[-1] == ix, "ix not equal"
        # assert tmp[2] == ix, "ix not equal"

        return tmp + [wrapped]
