import argparse
import os
import json
import numpy as np
import cv2
import logging
import torch
from PIL import Image
import h5py

def load_video_paths(annotation_file, video_name_mapping, video_dir):
    """
    返回视频路径列表，每个元素是一个元组，元组中包含了视频的路径和视频的id
    例如：[('/root/autodl-tmp/MSVD-QA/video/00jrXRMlZOY_0_10.avi', 47)]
    :param annotation_file: annotation 文件路径
    :param video_name_mapping: video name mapping 文件路径
    :param video_dir: 视频文件夹路径
    :return: video_paths: 视频路径列表
    :author: njupt-mcc(github), qms
    """
    video_paths = []
    video_ids = []
    with open(annotation_file, 'r') as anno_file:  # MSVD-QA/train_qa.json
        instances = json.load(anno_file)
        # {"answer":"animal","id":0,"question":"what is chewing on a nut?","video_id":1}
        [video_ids.append(instance['video_id']) for instance in instances]
    video_ids = set(video_ids)
    with open(video_name_mapping, 'r') as mapping:
        mapping_pairs = mapping.read().split('\n')  # 读取文件中的每一行
        # 00jrXRMlZOY_0_10 vid47
    mapping_dict = {}
    for idx in range(len(mapping_pairs)):
        cur_pair = mapping_pairs[idx].split(' ')  # 读取每一行中的两个元素
        mapping_dict[cur_pair[1]] = cur_pair[0]  # vid47: 00jrXRMlZOY_0_10
    for video_id in video_ids:
        video_paths.append(
            (video_dir + '/{}.avi'.format(mapping_dict['vid' + str(video_id)]), video_id)
            # '/root/autodl-tmp/MSVD-QA/video' + '{}.avi'.format(00jrXRMlZOY_0_10), 47
            # /root/autodl-tmp/MSVD-QA/video/00jrXRMlZOY_0_10.avi, 47
        )
    return video_paths


def load_video_id_to_video_path(video_paths):
    """
    将video_paths转换成字典，键是video_id，值是video_path
    :param video_paths: 视频路径列表，每个元素是一个元组，元组中包含了视频的路径和视频的id
    :return: video_id_to_video_path: video_id到video_path的映射
    :author: qms
    """
    video_id_to_video_path = {}
    for video_path in video_paths:
        video_id_to_video_path[video_path[1]] = video_path[0]
    return video_id_to_video_path


def extract_clips_with_consecutive_frames(path, num_clips, num_frames_per_clip, image_height, image_width, model):
    """
    从视频中提取连续的帧
    :param path: 视频路径
    :param num_clips: 提取的clip数目
    :param num_frames_per_clip: 每个clip中的帧数
    :param image_height: 图片高度
    :param image_width: 图片宽度
    :param model: 模型，如果是resnext101，需要额外的处理
    :return: clips: 提取的clip列表，如果按照默认值，包含24个元素，
        clips in resnet101: torch.Size([24, 16, 3, 224, 224])
        即 [num_clips, num_frames_per_clip, channels, width, height]
        clips in resnext101: torch.Size([24, 3, 16, 224, 224])
        即 [num_clips, channels, num_frames_per_clip, width, height]
    :author: njupt-mcc(github), qms
    """
    clips = list()
    video_data = []
    cap = cv2.VideoCapture(path)
    assert cap.isOpened(), 'Cannot capture source {}'.format(path)
    rval, frame = cap.read()  # 读取第一帧，rval是一个bool值，frame是一个三维矩阵
    while rval:
        b, g, r = cv2.split(frame)  # opencv读取的是BGR，b是一个二维矩阵
        frame = cv2.merge([r, g, b])  # 转换成RGB，frame是一个三维矩阵
        video_data.append(frame)  # 添加到list中
        rval, frame = cap.read()
        cv2.waitKey(1)  # 每一帧之间的间隔
    cap.release()
    total_frames = len(video_data)  # 总共的帧数
    img_size = (image_height, image_width)  # default: (224, 224)

    # 将total_frames分成num_clips+1份，每一份的间隔是total_frames/(num_clips+1)，
    # 例如np.linspace(0, 40, 5) = [0 10 20 30 40]，再取出[1:4] = [10 20 30]
    for i in np.linspace(0, total_frames, num_clips + 2, dtype=np.int32)[1:num_clips + 1]:
        clip_start = int(i) - int(num_frames_per_clip / 2)
        clip_end = int(i) + int(num_frames_per_clip / 2)
        if clip_start < 0:
            clip_start = 0
        if clip_end > total_frames:
            clip_end = total_frames - 1
        clip = video_data[clip_start:clip_end]
        if clip_start == 0:  # 添加开头的frames
            shortage = num_frames_per_clip - (clip_end - clip_start)
            added_frames = []
            for _ in range(shortage):
                added_frames.append(np.expand_dims(video_data[clip_start], axis=0))
            if len(added_frames) > 0:
                added_frames = np.concatenate(added_frames, axis=0)
                clip = np.concatenate((added_frames, clip), axis=0)
        if clip_end == (total_frames - 1):  # 添加末尾的frames
            shortage = num_frames_per_clip - (clip_end - clip_start)
            added_frames = []
            for _ in range(shortage):
                added_frames.append(np.expand_dims(video_data[clip_end], axis=0))
            if len(added_frames) > 0:
                added_frames = np.concatenate(added_frames, axis=0)
                clip = np.concatenate((clip, added_frames), axis=0)
        new_clip = []
        for j in range(num_frames_per_clip):
            frame_data = clip[j]
            img = Image.fromarray(frame_data)
            img = img.resize(img_size, Image.BICUBIC)
            frame_data = np.array(img)
            frame_data = np.transpose(frame_data, axes=(2, 0, 1))
            new_clip.append(frame_data)
        new_clip = np.asarray(new_clip)  # (num_frames, channels, width, height)
        if model == 'resnext101':
            new_clip = np.squeeze(new_clip)
            new_clip = np.transpose(new_clip, axes=(1, 0, 2, 3))  # (channels, num_frames, width, height)
        clips.append(new_clip)

    mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
    std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)

    if model == 'resnet101':
        for clip in clips:
            clip = clip.astype(np.float32)  # 转换成float32，astype是numpy的函数
            clip = (clip / 255.0 - mean) / std  # 归一化
            clip = torch.FloatTensor(clip)  # .cuda()  # 转换成tensor
        clips = torch.FloatTensor(np.asarray(clips))  # .cuda()
        # print("clips in resnet101: {}".format(clips.shape))  # clips in resnet101: torch.Size([24, 16, 3, 224, 224])

    elif model == 'resnext101':
        clips = torch.FloatTensor(np.asarray(clips))  # .cuda()
        # print("clips in resnext101: {}".format(clips.shape))  # clips in resnext101: torch.Size([24, 3, 16, 224, 224])

    return clips


if __name__ == '__main__':
    """
    预处理视频，将视频切分成若干帧，保存到指定h5文件中
    对于resnet101，存在两个dset，一个是video_ids，一个是resnet_features，其中resnet_features是numpy数组，需要转换成tensor再转到cuda
    :author: qms
    """
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')

    # 初始化参数
    parser = argparse.ArgumentParser()
    parser.add_argument('--annotation_file', type=str, default='data/msvd-qa/train_qa.json')
    parser.add_argument('--num_clips', type=int, default=8)
    parser.add_argument('--num_frames_per_clip', type=int, default=16)
    parser.add_argument('--image_height', type=int, default=224)
    parser.add_argument('--image_width', type=int, default=224)
    parser.add_argument('--video_dir', type=str, default='data/msvd-qa/video')
    parser.add_argument('--video_name_mapping', type=str, default='data/msvd-qa/youtube_mapping.txt')
    parser.add_argument('--model', type=str, default='resnet101')
    parser.add_argument('--output_dir', type=str, default='data/msvd-qa/frames')
    parser.add_argument('--seed', type=int, default=666)
    opt = parser.parse_args()


    # 检查和创建文件夹
    output_dir = opt.output_dir
    output_filename = 'frames_{}_{}_{}_{}_{}_{}.h5'.format(
        opt.annotation_file.split('/')[-1].split('.')[0], 
        opt.num_clips, 
        opt.num_frames_per_clip, 
        opt.image_height, 
        opt.image_width, 
        opt.model
    )
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    assert os.path.exists(output_dir), 'output_dir {} does not exist'.format(output_dir)
    output_filename = os.path.join(output_dir, output_filename)
    assert os.path.exists(opt.annotation_file), 'annotation_file not exists'
    assert opt.model in ['resnet101', 'resnext101'], 'model must be resnet101 or resnext101'
    assert os.path.exists(opt.video_dir), 'video_path not exists'


    video_paths = load_video_paths(opt.annotation_file, opt.video_name_mapping, opt.video_dir)
    video_id_to_video_path = load_video_id_to_video_path(video_paths)


    logging.info('writing clips into file {}'.format(output_filename))
    with h5py.File(output_filename, 'w') as fd:
        feat_dset = None
        video_ids_dset = None
        i0 = 0
        for video_id, path in video_id_to_video_path.items():
            assert os.path.exists(path), 'video {} not exists'.format(path)
            logging.info('processing video {}, id {}'.format(path, video_id))
            clips = extract_clips_with_consecutive_frames(path, opt.num_clips, opt.num_frames_per_clip,
                                                        opt.image_height, opt.image_width, opt.model)
            if opt.model == 'resnet101':
                clips = np.asarray(clips)
                if feat_dset is None:
                    feat_dset = fd.create_dataset('resnet_features', 
                                                (len(video_id_to_video_path), opt.num_clips, 
                                                 opt.num_frames_per_clip, 3, opt.image_height, opt.image_width), 
                                                dtype=np.float32)
                    video_ids_dset = fd.create_dataset('video_ids', (len(video_id_to_video_path),), dtype=int)
            else:
                clips = np.asarray(clips)
                if feat_dset is None:
                    feat_dset = fd.create_dataset('resnext_features', 
                                                (len(video_id_to_video_path), opt.num_clips, 3, 
                                                 opt.num_frames_per_clip, opt.image_height, opt.image_width), 
                                                dtype=np.float32)
                    video_ids_dset = fd.create_dataset('video_ids', (len(video_id_to_video_path),), dtype=int)
            
            i1 = i0 + 1
            video_ids_dset[i0:i1] = video_id  # 将video_id写入到video_ids_dset中
            feat_dset[i0:i1] = clips  # 将clip_feat写入到feat_dset中
            i0 = i1  # 前进一步
