import cPickle as pickle
import numpy as np
import os
from collections import OrderedDict
from utils import save_pickle


class DataEngine(object):
    def __init__(self,
                 num_frames=28,
                 data_dir='/home/sensetime/data/msvd/youtube2text_iccv15',
                 feat_dir='/home/sensetime/data/msvd/npy'):
        self.K = num_frames
        self.data_dir = data_dir
        self.feat_dir = feat_dir

    def clean_str(self, caption):
        caption = caption.replace('.', '').replace(',', '').replace("'", "").replace('"', '')
        caption = caption.replace('&', 'and').replace('(', '').replace(")", "").replace('-', ' ')
        caption = " ".join(caption.split())  # replace multiple spaces
        return caption.lower()

    def _build_caption_vector(self, caption, word_to_idx, max_length=15):
        words = caption.split(" ")  # caption contrains only lower-case words
        words = words[:max_length]
        cap_vec = []
        cap_vec.append(word_to_idx['<START>'])
        for word in words:
            if word in word_to_idx:
                cap_vec.append(word_to_idx[word])
        cap_vec.append(word_to_idx['<END>'])

        # pad short caption with the special null token '<NULL>' to make it fixed-size vector
        if len(cap_vec) < (max_length + 2):
            for j in range(max_length + 2 - len(cap_vec)):
                cap_vec.append(word_to_idx['<NULL>'])
        if len(cap_vec) != max_length + 2:
            raise Exception
        return np.asarray(cap_vec)

    def wordidx(self):
        filepath = os.path.join(self.data_dir, 'worddict.pkl')
        word_to_idx = pickle.load(open(filepath, 'r'))
        for k in word_to_idx.keys():
            word_to_idx[k] += 1
        word_to_idx['<NULL>'] = 0
        word_to_idx['<START>'] = 1
        word_to_idx['<END>'] = 2
        idx_to_word = OrderedDict((v, k) for k, v in word_to_idx.iteritems())
        return word_to_idx, idx_to_word

    def movieID_vid(self):
        filepath = os.path.join(self.data_dir, 'dict_youtube_mapping.pkl')
        movieID_to_vid = pickle.load(open(filepath, 'r'))
        vid_to_movieID = OrderedDict((v, k) for k, v in movieID_to_vid.iteritems())
        return vid_to_movieID, movieID_to_vid

    def movieID_caption(self):
        filepath = os.path.join(self.data_dir, 'dict_movieID_caption.pkl')
        movieID_cap = pickle.load(open(filepath, 'r'))
        return movieID_cap

    def feature(self):
        filepath = os.path.join(self.data_dir, 'FEAT_key_vidID_value_features.pkl')
        feat = pickle.load(open(filepath, 'r'))
        return feat

    def get_data(self, split='all', which='resnet152'):
        word_to_idx, _ = self.wordidx()
        if which == 'pretrained':
            feat = self.feature()
        else:
            feat = None
        cap = self.movieID_caption()
        vid_to_movieID, _ = self.movieID_vid()
        # vid_list = sorted(vid_to_movieID.keys())
        if split == 'train' or split == 'test' or split == 'val':
            return self.get_split(word_to_idx, feat, cap, vid_to_movieID, split, which)
        elif split == 'all':
            train = self.get_split(word_to_idx, feat, cap, vid_to_movieID, 'train', which)
            valid = self.get_split(word_to_idx, feat, cap, vid_to_movieID, 'val', which)
            test = self.get_split(word_to_idx, feat, cap, vid_to_movieID, 'test', which)
            return train, valid, test
        else:
            raise NotImplementedError

    def get_split(self, word_to_idx, feat, cap, vid_to_movieID, split='train', which='resnet152'):
        # word_to_idx, _ = self.wordidx()
        # feat = self.feature()
        # cap = self.movieID_caption()
        # vid_to_movieID, _ = self.movieID_vid()
        num_videos = len(vid_to_movieID)
        vid_list = np.arange(num_videos)
        if split == 'train':
            vid_list = vid_list[:1200]
        elif split == 'val':
            vid_list = vid_list[1200:1300]
        elif split == 'test':
            vid_list = vid_list[1300:]
        else:
            raise NotImplementedError
        features = []
        captions = []
        image_idx = []
        references = {}
        id = 0
        for i in vid_list:
            vid = 'vid%d' % (i + 1)
            movieID = vid_to_movieID[vid]
            if which == 'resnet152':
                filepath = os.path.join(self.feat_dir, '%s.npy' % vid)
                a = np.load(open(filepath, 'r'))
                features.append(self.get_sub_frames(a))
            elif which == 'pretrained':
                features.append(self.get_sub_frames(feat[vid]))
            else:
                raise NotImplementedError
            references[id] = []
            for sentence in cap[movieID]:
                image_idx.append(i)
                sentence = self.clean_str(sentence)
                vec = self._build_caption_vector(sentence, word_to_idx)
                captions.append(vec)
                references[id].append(sentence)
            id += 1
        save_pickle(references, './data/%s/%s.references.pkl' % (split, split))
        features = np.array(features)
        captions = np.array(captions)
        image_idx = np.array(image_idx)
        return {'features': features, 'captions': captions, 'image_idx': image_idx}

    def get_sub_frames(self, frames):
        # from all frames, take K of them, then add end of video frame
        if len(frames) < self.K:
            # frames_ = self.add_end_of_video_frame(frames)
            frames_ = self.pad_frames(frames, self.K)
        else:

            frames_ = self.extract_frames_equally_spaced(frames)
            # frames_ = self.add_end_of_video_frame(frames_)
        frames_ = np.asarray(frames_)
        return frames_

    def pad_frames(self, frames, limit):
        # pad frames with 0, compatible with both conv and fully connected layers
        last_frame = frames[-1]
        padding = np.asarray([last_frame * 0.] * (limit - len(frames)))
        frames_padded = np.concatenate([frames, padding], axis=0)
        return frames_padded

    def extract_frames_equally_spaced(self, frames):
        # chunk frames into 'how_many' segments and use the first frame
        # from each segment
        n_frames = len(frames)
        splits = np.array_split(range(n_frames), self.K)
        idx_taken = [s[0] for s in splits]
        sub_frames = frames[idx_taken]
        return sub_frames


if __name__ == '__main__':
    engine = DataEngine()
    # train, valid, test = engine.data()
    # for k, v in train.iteritems():
    #     print k, len(v)
    # for k, v in valid.iteritems():
    #     print k, len(v)
    # for k, v in test.iteritems():
    #     print k, len(v)
    # word_to_idx, idx_to_word = engine.wordidx()
    # for k, v in idx_to_word.iteritems():
    #     print k, v
    train, valid, test = engine.get_data()
    # features = train['features']
    # print features[0].shape
    # print features[1].shape
    image_idx = train['image_idx']
    for idx in image_idx:
        print idx
        # captions = train['captions']
        # print captions[0].shape
        # print captions[1].shape
