from torch.utils.data import Dataset
import json
import logging
import cv2
import numpy as np
from PIL import Image
import torch
import os
import h5py


QUESTION_CATEGORY = {'count': 0, 'exist': 1, 'query_color': 2, 'query_size': 3, 'query_actiontype': 4,
                     'query_actiondir': 5, 'query_shape': 6, 'greater_than': 7, 'equal_to': 8,
                     'less_than': 9, 'equal_color': 10, 'equal_size': 11, 'equal_actiontype': 12, 'equal_actiondir': 13,
                     'equal_shape': 14}


def load_video_paths(annotation_file, video_name_mapping, video_dir):
    """
    返回视频路径列表，每个元素是一个元组，元组中包含了视频的路径和视频的id
    例如：[('/root/autodl-tmp/MSVD-QA/video/00jrXRMlZOY_0_10.avi', 47)]
    :param annotation_file: annotation 文件路径
    :param video_name_mapping: video name mapping 文件路径
    :param video_dir: 视频文件夹路径
    :return: video_paths: 视频路径列表
    :author: njupt-mcc(github), qms
    """
    video_paths = []
    video_ids = []
    with open(annotation_file, 'r') as anno_file:  # MSVD-QA/train_qa.json
        instances = json.load(anno_file)
        # {"answer":"animal","id":0,"question":"what is chewing on a nut?","video_id":1}
        [video_ids.append(instance['video_id']) for instance in instances]
    video_ids = set(video_ids)
    with open(video_name_mapping, 'r') as mapping:
        mapping_pairs = mapping.read().split('\n')  # 读取文件中的每一行
        # 00jrXRMlZOY_0_10 vid47
    mapping_dict = {}
    for idx in range(len(mapping_pairs)):
        cur_pair = mapping_pairs[idx].split(' ')  # 读取每一行中的两个元素
        mapping_dict[cur_pair[1]] = cur_pair[0]  # vid47: 00jrXRMlZOY_0_10
    for video_id in video_ids:
        video_paths.append(
            (video_dir + '/{}.avi'.format(mapping_dict['vid' + str(video_id)]), video_id)
            # '/root/autodl-tmp/MSVD-QA/video' + '{}.avi'.format(00jrXRMlZOY_0_10), 47
            # /root/autodl-tmp/MSVD-QA/video/00jrXRMlZOY_0_10.avi, 47
        )
    return video_paths


def load_video_id_to_video_path(video_paths):
    """
    将video_paths转换成字典，键是video_id，值是video_path
    :param video_paths: 视频路径列表，每个元素是一个元组，元组中包含了视频的路径和视频的id
    :return: video_id_to_video_path: video_id到video_path的映射
    :author: qms
    """
    video_id_to_video_path = {}
    for video_path in video_paths:
        video_id_to_video_path[video_path[1]] = video_path[0]
    return video_id_to_video_path


def extract_clips_with_consecutive_frames(path, num_clips, num_frames_per_clip, image_height, image_width, model):
    """
    从视频中提取连续的帧
    :param path: 视频路径
    :param num_clips: 提取的clip数目
    :param num_frames_per_clip: 每个clip中的帧数
    :param image_height: 图片高度
    :param image_width: 图片宽度
    :param model: 模型，如果是resnext101，需要额外的处理
    :return: clips: 提取的clip列表，如果按照默认值，包含24个元素，
        clips in resnet101: torch.Size([24, 16, 3, 224, 224])
        clips in resnext101: torch.Size([24, 3, 16, 224, 224])
    :author: njupt-mcc(github), qms
    """
    clips = list()
    video_data = []
    if model == 'resnet101':
        logging.info('extracting clips from {}'.format(path))
    cap = cv2.VideoCapture(path)
    assert cap.isOpened(), 'Cannot capture source {}'.format(path)
    rval, frame = cap.read()  # 读取第一帧，rval是一个bool值，frame是一个三维矩阵
    while rval:
        b, g, r = cv2.split(frame)  # opencv读取的是BGR，b是一个二维矩阵
        frame = cv2.merge([r, g, b])  # 转换成RGB，frame是一个三维矩阵
        video_data.append(frame)  # 添加到list中
        rval, frame = cap.read()
        cv2.waitKey(1)  # 每一帧之间的间隔
    cap.release()
    total_frames = len(video_data)  # 总共的帧数
    img_size = (image_height, image_width)  # default: (224, 224)

    # 将total_frames分成num_clips+1份，每一份的间隔是total_frames/(num_clips+1)，
    # 例如np.linspace(0, 40, 5) = [0 10 20 30 40]，再取出[1:4] = [10 20 30]
    for i in np.linspace(0, total_frames, num_clips + 2, dtype=np.int32)[1:num_clips + 1]:  # [0 10 20 30 40]
        clip_start = int(i) - int(num_frames_per_clip / 2)  # 2 12 22
        # print("i={}, clip_start={}".format(i, clip_start))
        clip_end = int(i) + int(num_frames_per_clip / 2)  # 18 28 38
        if clip_start < 0:
            clip_start = 0
        if clip_end > total_frames:
            clip_end = total_frames - 1
        clip = video_data[clip_start:clip_end]
        if clip_start == 0:  # 添加开头的frames
            shortage = num_frames_per_clip - (clip_end - clip_start)
            added_frames = []
            for _ in range(shortage):
                added_frames.append(np.expand_dims(video_data[clip_start], axis=0))
            if len(added_frames) > 0:
                added_frames = np.concatenate(added_frames, axis=0)
                clip = np.concatenate((added_frames, clip), axis=0)
        if clip_end == (total_frames - 1):  # 添加末尾的frames
            shortage = num_frames_per_clip - (clip_end - clip_start)
            added_frames = []
            for _ in range(shortage):
                added_frames.append(np.expand_dims(video_data[clip_end], axis=0))
            if len(added_frames) > 0:
                added_frames = np.concatenate(added_frames, axis=0)
                clip = np.concatenate((clip, added_frames), axis=0)
        new_clip = []
        for j in range(num_frames_per_clip):
            frame_data = clip[j]
            img = Image.fromarray(frame_data)
            img = img.resize(img_size, Image.BICUBIC)
            # img = img.transpose(2, 0, 1)[None]
            frame_data = np.array(img)
            frame_data = np.transpose(frame_data, axes=(2, 0, 1))
            new_clip.append(frame_data)
        new_clip = np.asarray(new_clip)  # (num_frames, width, height, channels)
        if model == 'resnext101':
            new_clip = np.squeeze(new_clip)
            new_clip = np.transpose(new_clip, axes=(1, 0, 2, 3))  # (num_frames, channels, width, height)
        clips.append(new_clip)

    mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
    std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)

    if model == 'resnet101':
        for clip in clips:
            clip = clip.astype(np.float32)  # 转换成float32，astype是numpy的函数
            clip = (clip / 255.0 - mean) / std  # 归一化
            clip = torch.FloatTensor(clip).cuda()  # 转换成tensor
        # 新增的代码
        clips = torch.FloatTensor(np.asarray(clips)).cuda()
        # print("clips in resnet101: {}".format(clips.shape))  # clips in resnet101: torch.Size([24, 16, 3, 224, 224])

    elif model == 'resnext101':
        clips = torch.FloatTensor(np.asarray(clips)).cuda()
        # print("clips in resnext101: {}".format(clips.shape))  # clips in resnext101: torch.Size([24, 3, 16, 224, 224])

    return clips


class MSVDDataset(Dataset):
    def __init__(self, annotation_file, video_name_mapping, video_dir, num_clips, num_frames_per_clip, image_height, image_width, 
                 answers, questions, questions_len, video_ids, q_ids, is_video_preprocessed,
                 transform=None, question_category=None):
        """
        初始化
        :param annotation_file: 包含问题 instance 的 annotation 文件路径，默认为 data/MSVD-QA/train_qa.json
        :param video_name_mapping: 视频名字到索引的映射，默认为 data/MSVD-QA/youtube_mapping.txt
        :param video_dir: 视频文件夹，默认为 data/MSVD-QA/video
        :param num_clips: 从每个视频中提取的 clip 数量
        :param num_frames_per_clip: 每个 clip 中包含的帧数
        :param image_height: 图片高度
        :param image_width: 图片宽度
        :param answers: answer对应的index
        :param questions: 每一个元素是一个列表，每个question对应的index
        :param questions_len: 每个question的真实长度
        :param video_ids: 存放的是instance中对应的id，instance请参考预处理问题
        :param q_ids: 存放的是question对应的id，enumerate对应的id
        
        :author: MIT, qms
        """
        self.annotaaion_file = annotation_file
        self.video_name_mapping = video_name_mapping
        self.video_dir = video_dir
        self.num_clips = num_clips
        self.num_frames_per_clip = num_frames_per_clip
        self.image_height = image_height
        self.image_width = image_width

        self.all_answers = answers
        self.question_category = question_category
        self.all_questions = torch.LongTensor(np.asarray(questions))
        self.all_questions_len = torch.LongTensor(np.asarray(questions_len))
        self.all_video_ids = torch.LongTensor(np.asarray(video_ids))
        self.all_q_ids = q_ids
        self.transform = transform
        
        self.video_paths = load_video_paths(annotation_file, video_name_mapping, video_dir)
        self.video_id_to_video_path = load_video_id_to_video_path(self.video_paths)
        self.is_video_preprocessed = is_video_preprocessed

        if is_video_preprocessed:
            output_dir = 'data/msvd-qa/frames'
            output_filename = 'frames_{}_{}_{}_{}_{}_{}.h5'.format(
                annotation_file.split('/')[-1].split('.')[0], 
                num_clips, 
                num_frames_per_clip, 
                image_height, 
                image_width, 
                "resnet101"
            )
            self.app_feature_h5 = os.path.join(output_dir, output_filename)
            output_filename = 'frames_{}_{}_{}_{}_{}_{}.h5'.format(
                annotation_file.split('/')[-1].split('.')[0], 
                num_clips, 
                num_frames_per_clip, 
                image_height, 
                image_width, 
                "resnext101"
            )
            self.motion_feature_h5 = os.path.join(output_dir, output_filename)
            assert os.path.exists(self.app_feature_h5)
            assert os.path.exists(self.motion_feature_h5)
            with h5py.File(self.app_feature_h5, 'r') as app_features_file:
                app_video_ids = app_features_file['video_ids'][()]
            self.app_feat_id_to_index = {str(id): i for i, id in enumerate(app_video_ids)}
            with h5py.File(self.motion_feature_h5, 'r') as motion_features_file:
                motion_video_ids = motion_features_file['video_ids'][()]
            self.motion_feat_id_to_index = {str(id): i for i, id in enumerate(motion_video_ids)}

    def __len__(self):
        """
        因为一个视频会有多个问题，所以这里返回的是问题的数量
        :return: 问题的数量
        :author: MIT
        """
        return len(self.all_questions)

    def __getitem__(self, index):
        """
        返回一个问题对应的视频的多个 clip，以及问题的答案
        :param index: 索引
        :return video_idx: 视频的索引，torch.Tensor，torch.Size([2])，因为默认batch_size=2
        :return question_idx: 问题的索引，torch.Tensor，torch.Size([2])
        :return answer: 答案用字典的编码表示，torch.Tensor，torch.Size([2])
        :return appearance_clips: 视频的外观特征，torch.Tensor，torch.Size([2, 8, 16, 3, 128, 128])
        :return motion_clips: 视频的运动特征，torch.Tensor，torch.Size([2, 8, 3, 16, 128, 128])
            即batch_size, num_clips, channels, num_frames_per_clip, image_height, image_width
        :return question: 问题的编码，torch.Tensor，torch.Size([2, 21])
        :return question_len: 问题的真实长度，torch.Tensor，torch.Size([2])
        """
        answer = self.all_answers[index] if self.all_answers is not None else None
        question = self.all_questions[index]
        question_category = QUESTION_CATEGORY[
            self.question_category[index]
        ] if self.question_category is not None else None
        question_len = self.all_questions_len[index]
        video_idx = self.all_video_ids[index].item()
        question_idx = self.all_q_ids[index]
        video_path = self.video_id_to_video_path[video_idx]

        if not self.is_video_preprocessed:
            appearance_clips = extract_clips_with_consecutive_frames(video_path, self.num_clips, self.num_frames_per_clip, self.image_height, self.image_width, 'resnet101')
            motion_clips = extract_clips_with_consecutive_frames(video_path, self.num_clips, self.num_frames_per_clip, self.image_height, self.image_width, 'resnext101')
        else:
            logging.info('extracting clips from {}'.format(video_path))
            app_index = self.app_feat_id_to_index[str(video_idx)]
            motion_index = self.motion_feat_id_to_index[str(video_idx)]
            with h5py.File(self.app_feature_h5, 'r') as f_app:
                appearance_clips = f_app['resnet_features'][app_index]
            with h5py.File(self.motion_feature_h5, 'r') as f_motion:
                motion_clips = f_motion['resnext_features'][motion_index]

        if question_category is None:
            return (
                video_idx, question_idx, answer, appearance_clips, motion_clips, question, question_len
            )
        else:
            return (
                video_idx, question_idx, question_category, answer, appearance_clips, motion_clips, question, question_len
            )
        