"""
Definition of PyTorch "Dataset" that iterates through compressed videos
and return compressed representations (I-frames, motion vectors,
or residuals) for training or testing.
"""

import os
import os.path
import random

import numpy as np
import torch
import torch.utils.data as data

from coviar import get_num_frames
from coviar import load
from .transforms import color_aug
from . import getLogger
import logging
import typing
log: logging.Logger
# Group of picture
GOP_SIZE = 12


def clip_and_scale(img, size):
    return (img * (127.5 / size)).astype(np.int32)


def get_seg_range(n, num_segments, seg, representation):
    '''
    n 视频的帧数
    对于hmdb51 n \in [21, 305]
    对于ucf101 n \in
    '''
    if representation in ['residual', 'mv']:
        n -= 1  # 0-th -> I-frame

    seg_size = float(n - 1) / num_segments  # ?_?
    seg_begin = int(np.round(seg_size * seg))
    seg_end = int(np.round(seg_size * (seg+1)))
    if seg_end == seg_begin:
        seg_end = seg_begin + 1

    if representation in ['residual', 'mv']:
        # Exclude the 0-th frame, because it's an I-frmae.
        return seg_begin + 1, seg_end + 1

    return seg_begin, seg_end


def get_gop_pos(frame_idx, representation):
    gop_index = frame_idx // GOP_SIZE
    gop_pos = frame_idx % GOP_SIZE
    if representation in ['residual', 'mv']:
        if gop_pos == 0:
            gop_index -= 1
            gop_pos = GOP_SIZE - 1
    else:
        gop_pos = 0
    return gop_index, gop_pos


class CoviarDataSet(data.Dataset):
    def __init__(self,
                 cfg,
                 video_list,
                 transform):
        global log
        log = getLogger(cfg)
        self._transform = transform
        self._data_root = cfg.data_root
        self._data_name = cfg.data_name
        self._num_segments = cfg.num_segments
        self._representation = cfg.representation
        self._is_train = cfg.mode == 'train'
        self._accumulate = not cfg.no_accumulation

        self._input_mean = torch.from_numpy(
            np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))).float()
        self._input_std = torch.from_numpy(
            np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))).float()

        self._load_list(video_list)

    def _load_list(self, video_list):
        self._video_list = []  # [(视频路径, 标记, 视频帧数), ...]
        with open(video_list, 'r') as f:
            for line in f:
                video, _, label = line.strip().split()
                video_path = os.path.join(self._data_root, f'{video[:-4]}.mp4')
                self._video_list.append((
                    video_path,
                    int(label),
                    get_num_frames(video_path)))

        log.info('%d videos loaded.' % len(self._video_list))

    def _get_train_frame_index(self, num_frames, seg):
        # Compute the range of the segment.
        # 第seg段的起始帧和结束帧(后一帧)索引
        seg_begin, seg_end = get_seg_range(num_frames, self._num_segments, seg,
                                           representation=self._representation)

        # 从第seg段中随机抽取一帧 [seg_begin, seg_end-1]
        # Sample one frame from the segment.
        v_frame_idx = random.randint(seg_begin, seg_end - 1)
        return get_gop_pos(v_frame_idx, self._representation)

    def _get_test_frame_index(self, num_frames, seg):
        if self._representation in ['mv', 'residual']:
            num_frames -= 1

        seg_size = float(num_frames - 1) / self._num_segments
        v_frame_idx = int(np.round(seg_size * (seg + 0.5)))

        if self._representation in ['mv', 'residual']:
            v_frame_idx += 1

        return get_gop_pos(v_frame_idx, self._representation)

    def __getitem__(self, index):

        if self._representation == 'mv':
            representation_idx = 1
        elif self._representation == 'residual':
            representation_idx = 2
        else:
            representation_idx = 0

        if self._is_train:
            # TODO 训练时需要随机选择视频, 放回采样
            video_path, label, num_frames = random.choice(self._video_list)
        else:
            video_path, label, num_frames = self._video_list[index]

        frames = []
        for seg in range(self._num_segments):  # e.g. 0, 1, 2

            if self._is_train:
                gop_index, gop_pos = self._get_train_frame_index(
                    num_frames, seg)
            else:
                gop_index, gop_pos = self._get_test_frame_index(
                    num_frames, seg)

            # <class 'numpy.ndarray'>
            img = load(video_path, gop_index, gop_pos,
                       representation_idx, self._accumulate)

            if img is None:
                print('Error: loading video %s failed.' % video_path)
                img = np.zeros((256, 256, 2)) if self._representation == 'mv' else np.zeros(
                    (256, 256, 3))
            else:
                if self._representation == 'mv':
                    # reencode之后的尺寸 (256, 340, 2)
                    # 动作向量放大6.375倍
                    img = clip_and_scale(img, 20)
                    img += 128  # TODO 运动尺度的均值
                    # <0 -> =0 && >255 -> =255
                    img = (np.minimum(np.maximum(img, 0), 255)).astype(np.uint8)
                elif self._representation == 'residual':
                    img += 128
                    img = (np.minimum(np.maximum(img, 0), 255)).astype(np.uint8)

            if self._representation == 'iframe':
                img = color_aug(img)

                # BGR to RGB. (PyTorch uses RGB according to doc.)
                # 通道维反向
                img = img[..., ::-1]

            frames.append(img)

        # train transform = model.get_augmentation()
        # test transform = torchvision.transforms.Compose([
        #           GroupScale(int(model.scale_size)),
        #           GroupCenterCrop(model.crop_size),
        #           ])
        frames = self._transform(frames)

        frames = np.array(frames)
        # frames.shape  (3, 224, 224, 2)或(3, 224, 224, 3)
        frames = np.transpose(frames, (0, 3, 1, 2))
        # => (3, 2, 224, 224)或(3, 3, 224, 224)
        input = torch.from_numpy(frames).float() / 255.0
        # mean和std的shape为(1, 3, 1, 1), iframe和residual通过broadcast
        if self._representation == 'iframe':
            input = (input - self._input_mean) / self._input_std
        elif self._representation == 'residual':
            input = (input - 0.5) / self._input_std
        elif self._representation == 'mv':
            input = (input - 0.5)

        return input, label

    def __len__(self):
        return len(self._video_list)
