import numpy as np
import json
import pickle
import torch
import math
import h5py
from torch.utils.data import DataLoader
from .msvd_dataset import MSVDDataset
from .util import invert_dict
import logging


def load_vocab(path):
    """
    从 json 文件中读取字典
    :param path: json 文件路径
    :return:  vocab 字典，包含
        字典question_idx_to_token，键是索引，值是question中的token
        字典answer_idx_to_token，键是索引，值是answer中的token
        字典question_answer_idx_to_token，键是索引，值是question+answer中的token
    :author: MIT
    """
    with open(path, 'r') as f:
        vocab = json.load(f)
        vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
        # question_token_to_idx，它是一个字典，键是question中的token，值是对应的索引，例如'<NULL>': 0, '<UNK>': 1
        # 这里将它反过来，键是索引，值是token
        vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
        vocab['question_answer_idx_to_token'] = invert_dict(vocab['question_answer_token_to_idx'])
    return vocab


class MSVDDataLoader(DataLoader):
    def __init__(self, **kwargs):
        """
        初始化
        :param kwargs: 包含以下参数
            batch_size: batch 大小
            vocab_json: vocab json 文件路径，默认为 data/MSVD-QA/msvd-qa_vocab.json
            question_pt: question pt 文件路径，默认为 data/MSVD-QA/msvd-qa_train_questions.pt
            annotation_file: 包含问题 instance 的 annotation 文件路径，默认为 data/MSVD-QA/train_qa.json
            video_name_mapping: 视频名字映射文件路径，默认为 data/MSVD-QA/youtube_mapping.txt
            video_dir: 视频文件夹路径，默认为 data/MSVD-QA/video
            num_clips: 视频切分成的 clip 数量，默认为 24
            num_frames_per_clip: 每个 clip 中包含的帧数，默认为 16
            image_height: 图片高度，默认为 224
            image_width: 图片宽度，默认为 224
        :author: MIT, qms
        """
        # 读取msvd-qa_vocab.json
        vocab_json_path = str(kwargs.pop('vocab_json'))  # data/MSVD-QA/msvd-qa_vocab.json
        logging.info('loading vocab from %s' % vocab_json_path)
        vocab = load_vocab(vocab_json_path)  # 将 json 文件转换为 dict
        question_category = None

         # 读取 msvd-qa_train_questions.pt
        question_pt_path = str(kwargs.pop('question_pt'))  # data/MSVD-QA/msvd-qa_train_questions.pt
        logging.info('loading questions from %s' % question_pt_path)
        with open(question_pt_path, 'rb') as f:
            obj = pickle.load(f)
            if 'question_category' in obj:
                logging.info('loading question_category from %s' % question_pt_path)
                question_category = obj['question_category']
            else:
                logging.info('no question_category in %s' % question_pt_path)
            questions = obj['questions']
            questions_len = obj['questions_len']
            video_ids = obj['video_ids']
            q_ids = obj['question_id']
            answers = obj['answers']  # vocab-ans id
            glove_matrix = obj['glove']

        
        annotation_file = str(kwargs.pop('annotation_file'))  # data/MSVD-QA/train_qa.json
        video_name_mapping = str(kwargs.pop('video_name_mapping'))  # data/MSVD-QA/youtube_mapping.txt
        vedio_dir = str(kwargs.pop('video_dir'))  # data/MSVD-QA/video
        num_clips = kwargs.pop('num_clips')
        num_frames_per_clip = kwargs.pop('num_frames_per_clip')
        image_height = kwargs.pop('image_height')
        image_width = kwargs.pop('image_width')
        is_video_preprocessed = kwargs.pop('is_video_preprocessed')

        # 初始化数据集
        if question_category is None:  # 一般情况下，question_category 为 None
            logging.info('question_category is None')
            self.dataset = MSVDDataset(annotation_file, video_name_mapping, vedio_dir, num_clips, num_frames_per_clip, image_height, image_width,
                                       answers, questions, questions_len, video_ids, q_ids, is_video_preprocessed)
        else:
            logging.info('question_category is not None')
            self.dataset = MSVDDataset(annotation_file, video_name_mapping, vedio_dir, num_clips, num_frames_per_clip, image_height, image_width,
                                       answers, questions, questions_len, video_ids, q_ids, is_video_preprocessed,
                                       question_category=question_category)

        self.vocab = vocab
        self.batch_size = kwargs['batch_size']
        self.glove_matrix = glove_matrix  # 对应于question-token id

        super().__init__(self.dataset, **kwargs)


        def __len__(self):
            return math.ceil(len(self.dataset) / self.batch_size)
