import torch
import torch.utils.data as data
from PIL import Image
import os
import math
import functools
import json
import copy

import random
import numpy as np
from .dataset import get_seg_range
from .transforms import color_aug
from . import getLogger
import logging
log: logging.Logger

def load_value_file(file_path):
    with open(file_path, 'r') as input_file:
        value = float(input_file.read().rstrip('\n\r'))

    return value


def pil_loader(path):
    # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
    with open(path, 'rb') as f:
        with Image.open(f) as img:
            return img.convert('RGB')


def accimage_loader(path):
    try:
        import accimage
        return accimage.Image(path)
    except IOError:
        # Potentially a decoding problem, fall back to PIL.Image
        return pil_loader(path)


def get_default_image_loader():
    from torchvision import get_image_backend
    if get_image_backend() == 'accimage':
        return accimage_loader
    else:
        return pil_loader


def video_loader(video_dir_path, frame_indices, image_loader):
    video = []
    for i in frame_indices:
        image_path = os.path.join(video_dir_path, 'image_{:05d}.jpg'.format(i))
        if os.path.exists(image_path):
            video.append(image_loader(image_path))
        else:
            return video

    return video


def get_default_video_loader():
    image_loader = get_default_image_loader()
    return functools.partial(video_loader, image_loader=image_loader)


def load_annotation_data(data_file_path):
    with open(data_file_path, 'r') as data_file:
        return json.load(data_file)


def get_class_labels(data):
    class_labels_map = {}
    index = 0
    for class_label in data['labels']:
        class_labels_map[class_label] = index
        index += 1
    return class_labels_map


def get_video_names_and_annotations(data, subset):
    video_names = []
    annotations = []

    for key, value in data['database'].items():
        this_subset = value['subset']
        if this_subset == subset:
            label = value['annotations']['label']
            video_names.append('{}/{}'.format(label, key))
            annotations.append(value['annotations'])

    return video_names, annotations


def make_dataset(root_path, annotation_path, subset, n_samples_for_each_video,
                 sample_duration):
    data = load_annotation_data(annotation_path)
    video_names, annotations = get_video_names_and_annotations(data, subset)
    class_to_idx = get_class_labels(data)
    idx_to_class = {}
    for name, label in class_to_idx.items():
        idx_to_class[label] = name

    dataset = []
    for i in range(len(video_names)):
        if i % 1000 == 0:
            print('dataset loading [{}/{}]'.format(i, len(video_names)))

        video_path = os.path.join(root_path, video_names[i])
        if not os.path.exists(video_path):
            continue

        n_frames_file_path = os.path.join(video_path, 'n_frames')
        n_frames = int(load_value_file(n_frames_file_path))
        if n_frames <= 0:
            continue

        begin_t = 1
        end_t = n_frames
        sample = {
            'video': video_path,
            'segment': [begin_t, end_t],
            'n_frames': n_frames,
            'video_id': video_names[i].split('/')[1]
        }
        if len(annotations) != 0:
            sample['label'] = class_to_idx[annotations[i]['label']]
        else:
            sample['label'] = -1

        if n_samples_for_each_video == 1:
            sample['frame_indices'] = list(range(1, n_frames + 1))
            dataset.append(sample)
        else:
            if n_samples_for_each_video > 1:
                step = max(1,
                           math.ceil((n_frames - 1 - sample_duration) /
                                     (n_samples_for_each_video - 1)))
            else:
                step = sample_duration
            for j in range(1, n_frames, step):
                sample_j = copy.deepcopy(sample)
                sample_j['frame_indices'] = list(
                    range(j, min(n_frames + 1, j + sample_duration)))
                dataset.append(sample_j)

    return dataset, idx_to_class


class OriginalDataSet(data.Dataset):
    def __init__(self,
                 cfg,
                 video_list,
                 transform):
        global log
        log = getLogger(cfg)
        self._transform = transform
        self._data_root = cfg.data_root
        self._data_name = cfg.data_name
        self._num_segments = cfg.num_segments
        self._is_train = cfg.mode == 'train'
        # TODO: 硬编码
        self.data, self.class_names = make_dataset(
            '/home/zgp/i3d/data/ucf101_videos/jpg',
            '/home/zgp/i3d/data/UCF101TrainTestSplits-RecognitionTask/ucfTrainTestlist/ucf101_01.json',
            'training' if self._is_train else 'validation', 1, 3)

        self._input_mean = torch.from_numpy(
            np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))).float()
        self._input_std = torch.from_numpy(
            np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))).float()

        self._video_list = []  # [(视频路径, 标记, 视频帧数), ...]
        for item in self.data:
            self._video_list.append((item['video'], item['label'], item['n_frames']))

        log.info('%d videos loaded.' % len(self._video_list))

    def _get_train_frame_index(self, num_frames, seg):
        # Compute the range of the segment.
        # 第seg段的起始帧和结束帧(后一帧)索引
        seg_begin, seg_end = get_seg_range(num_frames, self._num_segments, seg,
                                           representation='iframe')

        # 从第seg段中随机抽取一帧 [seg_begin, seg_end-1]
        # Sample one frame from the segment.
        v_frame_idx = random.randint(seg_begin, seg_end - 1)
        return v_frame_idx

    def _get_test_frame_index(self, num_frames, seg):
        seg_size = float(num_frames - 1) / self._num_segments
        v_frame_idx = int(np.round(seg_size * (seg + 0.5)))

        return v_frame_idx

    def __getitem__(self, index):

        if self._is_train:
            # NOTE: 训练时需要随机选择视频, 放回采样
            video_path, label, num_frames = random.choice(self._video_list)
        else:
            video_path, label, num_frames = self._video_list[index]

        frames = []
        for seg in range(self._num_segments):  # e.g. 0, 1, 2

            if self._is_train:
                vdo_frm_idx = self._get_train_frame_index(num_frames, seg)
            else:
                vdo_frm_idx = self._get_test_frame_index(num_frames, seg)

            img_path = f'{video_path}/image_{vdo_frm_idx+1:0>5}.jpg'
            # <class 'numpy.ndarray'>
            img = np.array(pil_loader(img_path))

            if img is None:
                print('Error: loading video %s failed.' % video_path)
                img = np.zeros((256, 256, 3))

            img = color_aug(img)
            # BGR to RGB. (PyTorch uses RGB.) 通道维反向
            img = img[..., ::-1]
            frames.append(img)

        frames = self._transform(frames)

        frames = np.array(frames)
        # frames.shape (k=3, 224, 224, c=3) => (k=3, c=3, 224, 224)
        frames = np.transpose(frames, (0, 3, 1, 2))
        input = torch.from_numpy(frames).float() / 255.0
        # mean和std的shape为(1, 3, 1, 1), iframe和residual通过broadcast
        input = (input - self._input_mean) / self._input_std

        return input, label

    def __len__(self):
        return len(self._video_list)
