import argparse
import os
import random

# from scipy.misc import imresize
import cv2
import h5py
import numpy as np
import torch
import torchvision
from PIL import Image
from torch import nn
from tqdm import tqdm

from datautils import msrvtt_qa
from datautils import msvd_qa
from datautils import svqa
from datautils import utils
from models import resnext


def build_resnet():
    # 调用torchvision.models中的 resnet
    if not hasattr(torchvision.models, args.model):  # 这个来检测是否有某个package真的很有效
        raise ValueError('Invalid model "%s"' % args.model)
    if 'resnet' not in args.model:
        raise ValueError('Feature extraction only supports ResNets')
    cnn = getattr(torchvision.models, args.model)(pretrained=True)  # 这里的getattr是python的内置函数，用于返回一个对象的属性值
    model = torch.nn.Sequential(*list(cnn.children())[:-1])  # 除去最后的FC
    model.cuda()  # 将模型放到GPU上
    model.eval()  # eval模式,eval指的是evaluation，不使用BN和dropout
    return model


def build_resnext():
    model = resnext.resnet101(num_classes=400, shortcut_type='B', cardinality=32,
                              sample_size=112, sample_duration=16,
                              last_fc=False)  # 自动除去last fc
    model = model.cuda()  # 先cuda再选择dp
    model = nn.DataParallel(model, device_ids=None)  # 默认使用所有GPU
    assert os.path.exists('./data/preprocess/pretrained/resnext-101-kinetics.pth')
    model_data = torch.load('./data/preprocess/pretrained/resnext-101-kinetics.pth',
                            map_location='cpu')  # 首先load到cpu，再选择load到GPU
    model.load_state_dict(model_data['state_dict'])  # load_state_dict是nn.Module的函数，用于加载模型的参数
    model.eval()  # eval模式,eval指的是evaluation，不使用BN和dropout
    return model


def run_batch(cur_batch, model):
    """
    Args:
        cur_batch: treat a video as a batch of images，是一个numpy数组，存放的是一段视频的所有帧
        model: ResNet model for feature extraction
    Returns:
        ResNet extracted feature.
    """
    mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
    std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)

    image_batch = cur_batch.astype(np.float32)  # 转换成float32，astype是numpy的函数
    image_batch = (image_batch / 255.0 - mean) / std  # 归一化
    image_batch = torch.FloatTensor(image_batch).cuda()  # 转换成tensor并放到GPU上
    with torch.no_grad():  # 不计算梯度
        image_batch = torch.autograd.Variable(image_batch)  # 将tensor转换成Variable,Variable是tensor的一个封装，可以计算梯度

    feats = model(image_batch)  # 输入模型提取特征
    feats = feats.data.cpu().clone().numpy()  # 将tensor转到cpu上，再转换成numpy，clone是深拷贝

    return feats


def extract_clips_with_consecutive_frames(path, num_clips, num_frames_per_clip):
    """
    将视频分成num_clips份，每一份包含num_frames_per_clip帧，
    返回的clips是一个list，每一个元素是一个clip，clip是一个list，每一个元素是一帧
    Args:
        path: path of a video
        num_clips: expected numbers of split clips
        num_frames_per_clip: number of frames in a single clip, pretrained model only supports 16 frames
    Returns:
        A list of raw features of clips.
        valid: whether the video is valid
    """
    valid = True
    clips = list()
    print(path)
    try:
        cap = cv2.VideoCapture(path)  # 读取视频
        video_data = []
        if cap.isOpened():
            rval, frame = cap.read()  # 读取第一帧，rval是一个bool值，frame是一个三维矩阵
            while rval:
                # rval, frame = cap.read()
                b, g, r = cv2.split(frame)  # opencv读取的是BGR，b是一个二维矩阵
                frame = cv2.merge([r, g, b])  # 转换成RGB，frame是一个三维矩阵
                video_data.append(frame)  # 添加到list中
                rval, frame = cap.read()
                cv2.waitKey(1)  # 每一帧之间的间隔
        cap.release()
    except:
        print('file {} error'.format(path))
        raise ValueError
        valid = False
        if args.model == 'resnext101':
            return list(np.zeros(shape=(num_clips, 3, num_frames_per_clip, 112, 112))), valid
        else:
            return list(np.zeros(shape=(num_clips, num_frames_per_clip, 3, 224, 224))), valid
    total_frames = len(video_data)  # 总共的帧数
    # print("total_frames={}".format(total_frames))
    img_size = (args.image_height, args.image_width)  # default: (224, 224)

    # 将total_frames分成num_clips+1份，每一份的间隔是total_frames/(num_clips+1)，
    # 例如np.linspace(0, 40, 5) = [0 10 20 30 40]，再取出[1:4] = [10 20 30]
    for i in np.linspace(0, total_frames, num_clips + 2, dtype=np.int32)[1:num_clips + 1]:  # [0 10 20 30 40]
        clip_start = int(i) - int(num_frames_per_clip / 2)  # 2 12 22
        # print("i={}, clip_start={}".format(i, clip_start))
        clip_end = int(i) + int(num_frames_per_clip / 2)  # 18 28 38
        if clip_start < 0:
            clip_start = 0
        if clip_end > total_frames:
            clip_end = total_frames - 1
        clip = video_data[clip_start:clip_end]
        if clip_start == 0:  # 添加开头的frames
            shortage = num_frames_per_clip - (clip_end - clip_start)
            added_frames = []
            for _ in range(shortage):
                added_frames.append(np.expand_dims(video_data[clip_start], axis=0))
            if len(added_frames) > 0:
                added_frames = np.concatenate(added_frames, axis=0)
                clip = np.concatenate((added_frames, clip), axis=0)
        if clip_end == (total_frames - 1):  # 添加末尾的frames
            shortage = num_frames_per_clip - (clip_end - clip_start)
            added_frames = []
            for _ in range(shortage):
                added_frames.append(np.expand_dims(video_data[clip_end], axis=0))
            if len(added_frames) > 0:
                added_frames = np.concatenate(added_frames, axis=0)
                clip = np.concatenate((clip, added_frames), axis=0)
        new_clip = []
        for j in range(num_frames_per_clip):
            frame_data = clip[j]
            img = Image.fromarray(frame_data)
            img = img.resize(img_size, Image.BICUBIC)
            # img = img.transpose(2, 0, 1)[None]
            frame_data = np.array(img)
            frame_data = np.transpose(frame_data, axes=(2, 0, 1))
            new_clip.append(frame_data)
        new_clip = np.asarray(new_clip)  # (num_frames, width, height, channels)
        if args.model in ['resnext101']:
            new_clip = np.squeeze(new_clip)
            new_clip = np.transpose(new_clip, axes=(1, 0, 2, 3))
        clips.append(new_clip)
    return clips, valid


def generate_h5(model, video_ids, num_clips, outfile):  # default-8
    """
    将每个视频分割成num_clips个片段，然后使用 model 提取每个片段的特征，保存到h5文件中
    Args:
        model: loaded pretrained model for feature extraction，使用的模型
        video_ids: 是一个列表，每个元素是一个元组，元组中包含了视频的路径和视频的id
        例如[('/root/autodl-tmp/MSVD-QA/video/00jrXRMlZOY_0_10.avi', 47)]
        num_clips: expected numbers of split clips，分割的片段数
        outfile: path of output file to be written，输出文件的路径
    Returns:
        h5 file containing visual features of split clips，包含分割片段的视觉特征的h5文件
    """
    if not os.path.exists('data/{}'.format(args.dataset)):
        os.makedirs('data/{}'.format(args.dataset))

    dataset_size = len(video_ids)  # paths

    with h5py.File(outfile, 'w') as fd:
        feat_dset = None
        video_ids_dset = None
        i0 = 0
        _t = {'misc': utils.Timer()}
        for i, (video_path, video_id) in tqdm(enumerate(video_ids)):
            # 对于每一个视频，提取特征
            # i, '/root/autodl-tmp/MSVD-QA/video/YouTubeClips/00jrXRMlZOY_0_10.avi', 47
            # tdqm是一个进度条，enumerate是一个枚举函数，返回的是一个元组，元组的第一个元素是索引，第二个元素是video_path
            _t['misc'].tic()

            clips, valid = extract_clips_with_consecutive_frames(video_path, num_clips=num_clips,
                                                                 num_frames_per_clip=16)
            # 将视频分成num_clips份，每一份包含num_frames_per_clip帧，
            # 返回的clips是一个list，每一个元素是一个clip，clip是一个list，每一个元素是一帧
            if args.feature_type == 'appearance':
                clip_feat = []
                if valid:
                    # print(type(clips)) list
                    # print(len(clips)) 24
                    for clip_id, clip in enumerate(clips):
                        # print(type(clip)) # numpy.ndarray
                        # print(clip.shape) # (16, 3, 224, 224)
                        feats = run_batch(clip, model)  # (16, 2048) # 提取特征
                        feats = feats.squeeze()  # squeeze是属于numpy的函数，去掉维度为1的维度
                        clip_feat.append(feats)
                else:
                    clip_feat = np.zeros(shape=(num_clips, 16, 2048))
                clip_feat = np.asarray(clip_feat)  # (8, 16, 2048)，asarray是属于numpy的函数，将输入转换为数组
                if feat_dset is None:
                    C, F, D = clip_feat.shape
                    feat_dset = fd.create_dataset('resnet_features', (dataset_size, C, F, D),
                                                  dtype=np.float32)
                    # create_dataset是属于h5py的函数，创建一个数据集，
                    # 第一个参数是数据集的名称，第二个参数是数据集的形状，第三个参数是数据集的类型
                    video_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=int)
            elif args.feature_type == 'motion':
                clip_torch = torch.FloatTensor(np.asarray(clips)).cuda()
                if valid:
                    clip_feat = model(clip_torch)  # (8, 2048)
                    clip_feat = clip_feat.squeeze()
                    clip_feat = clip_feat.detach().cpu().numpy()
                else:
                    clip_feat = np.zeros(shape=(num_clips, 2048))
                if feat_dset is None:
                    C, D = clip_feat.shape
                    feat_dset = fd.create_dataset('resnext_features', (dataset_size, C, D),
                                                  dtype=np.float32)
                    video_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=int)

            i1 = i0 + 1
            feat_dset[i0:i1] = clip_feat  # 将clip_feat写入到feat_dset中
            video_ids_dset[i0:i1] = video_id  # 将video_id写入到video_ids_dset中
            i0 = i1  # 前进一步
            _t['misc'].toc()
            if i % 1000 == 0:
                print('{:d}/{:d} {:.3f}s (projected finish: {:.2f} hours)' \
                      .format(i1, dataset_size, _t['misc'].average_time,
                              _t['misc'].average_time * (dataset_size - i1) / 3600))


# 通过命令行参数判断模型与数据集类型，然后加载模型，提取特征
if __name__ == '__main__':

    # 处理命令行参数
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_id', type=int, default=2, help='specify which gpu will be used')  # 选择使用哪一个GPU呀
    # dataset info-选定数据集
    parser.add_argument('--dataset', default='svqa', choices=['msvd-qa', 'msrvtt-qa', 'svqa'], type=str)  # 有限的选择
    parser.add_argument('--question_type', default='none', choices=['none'], type=str)
    # output
    parser.add_argument('--out', dest='outfile',
                        help='output filepath', default="data/{}/{}_{}_feat.h5", type=str)
    # image sizes
    parser.add_argument('--num_clips', default=24, type=int)
    parser.add_argument('--image_height', default=224, type=int)
    parser.add_argument('--image_width', default=224, type=int)

    # network params
    parser.add_argument('--model', default='resnet101', choices=['resnet101', 'resnext101'], type=str)
    parser.add_argument('--seed', default='666', type=int, help='random seed')
    args = parser.parse_args()  # 下面就可以利用args来追踪到每一个具体的变量了

    if args.model == 'resnet101':  # 当然可以自己添加对应的参数了
        args.feature_type = 'appearance'
    elif args.model == 'resnext101':
        args.feature_type = 'motion'
    else:
        raise Exception('Feature type not supported!')
    # 个人感觉上面一段话重复而且meaningless

    # set gpu
    if args.model != 'resnext101':
        torch.cuda.set_device(args.gpu_id)  # 貌似resnet才会用GPU？

    # 设置随机数种子
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # annotation files
    if args.dataset == 'msrvtt-qa':
        args.annotation_file = '/home/WangJY/Jianyu_wang/MSRVTT-QA/{}_qa.json'
        args.video_dir = '/home/WangJY/Jianyu_wang/MSRVTT-QA/video/'
        video_paths = msrvtt_qa.load_video_paths(args)
        random.shuffle(video_paths)  # path都是要这么打乱的
        # load model
        if args.model == 'resnet101':
            model = build_resnet()
        elif args.model == 'resnext101':
            model = build_resnext()
        generate_h5(model, video_paths, args.num_clips,
                    args.outfile.format(args.dataset, args.dataset, args.feature_type))

    # 只要看msvd-qa
    elif args.dataset == 'msvd-qa':
        args.annotation_file = './data/MSVD-QA/{}_qa.json'  # TODO 已经修改了
        args.video_dir = './data/MSVD-QA/video'  # TODO 已经修改了
        args.video_name_mapping = './data/MSVD-QA/youtube_mapping.txt'  # TODO 已经修改了
        video_paths = msvd_qa.load_video_paths(args)
        # video_paths是一个列表，每个元素是一个元组，元组中包含了视频的路径和视频的id
        # 例如：[('/root/autodl-tmp/MSVD-QA/video/YouTubeClips/00jrXRMlZOY_0_10.avi', 47)]
        random.shuffle(video_paths)
        # load model
        if args.model == 'resnet101':
            model = build_resnet()  # resnet101 负责生成appearance features
        elif args.model == 'resnext101':
            model = build_resnext()  # resnext101 负责生成motion features
        else:
            raise Exception('Feature type not supported!')

        # TODO 看到这里
        generate_h5(model, video_paths, args.num_clips,
                    args.outfile.format(args.dataset, args.dataset, args.feature_type))
    elif args.dataset == 'svqa':  # 要分train val test的
        args.annotation_file = '/home/WangJY/Jianyu_wang/SVQA/questions.json'
        args.video_dir = '/home/WangJY/Jianyu_wang/SVQA/useVideo/'
        video_paths = svqa.load_video_paths(args)
        random.shuffle(video_paths)
        # load model
        if args.model == 'resnet101':
            model = build_resnet()
        elif args.model == 'resnext101':
            model = build_resnext()
        generate_h5(model, video_paths, args.num_clips,
                    args.outfile.format(args.dataset, args.dataset, args.feature_type))
