from config import *
from log import *

import copy
import json
import numpy as np
import pandas as pd


class LoadFeat(object):
    def __init__(self, feat_path):
        self.feat_path = feat_path

    def __call__(self, results):
        video_name = results['video_name'] # 视频名称
        file_name = video_name + ".csv"    # 视频特征文件名称
        file_path = os.path.join(self.feat_path, file_name) # 视频特征文件路径
        
        # video_feat = np.load(file_path) # 加载视频特征

        video_df = pd.read_csv(file_path) # 加载视频特征
        video_feat = video_df.values[:, :]

        video_feat = video_feat.T
        video_feat = video_feat.astype("float32")
        results['video_feat'] = video_feat # 将视频特征存放到results中
        return results


class GetMatchMap(object):
    def __init__(self, tscale):
        self.tscale = tscale
        self.tgap = 1. / self.tscale

    def __call__(self, results):
        match_map = []
        for idx in range(self.tscale):
            tmp_match_window = []
            xmin = self.tgap * idx
            for jdx in range(1, self.tscale + 1):
                xmax = xmin + self.tgap * jdx
                tmp_match_window.append([xmin, xmax])
            match_map.append(tmp_match_window)
            
        match_map = np.array(match_map)
        match_map = np.transpose(match_map, [1, 0, 2])
        match_map = np.reshape(match_map, [-1, 2])

        anchor_xmin = [self.tgap * i for i in range(self.tscale)]
        anchor_xmax = [self.tgap * i for i in range(1, self.tscale + 1)]

        results['match_map'] = match_map
        results['anchor_xmin'] = anchor_xmin
        results['anchor_xmax'] = anchor_xmax
        return results


class GetVideoLabel(object):
    def __init__(self, tscale, dscale, datatype="float32"):
        self.tscale = tscale
        self.dscale = dscale
        self.tgap = 1. / self.tscale
        self.datatype = datatype

    def iou_with_anchors(self, anchors_min, anchors_max, box_min, box_max):
        """Compute jaccard score between a box and the anchors.
        """
        len_anchors = anchors_max - anchors_min
        int_xmin = np.maximum(anchors_min, box_min)
        int_xmax = np.minimum(anchors_max, box_max)
        inter_len = np.maximum(int_xmax - int_xmin, 0.)
        union_len = len_anchors - inter_len + box_max - box_min
        jaccard = np.divide(inter_len, union_len)
        return jaccard
    
    def ioa_with_anchors(self, anchors_min, anchors_max, box_min, box_max):
        """Compute intersection between score a box and the anchors.
        """
        len_anchors = anchors_max - anchors_min
        int_xmin = np.maximum(anchors_min, box_min)
        int_xmax = np.minimum(anchors_max, box_max)
        inter_len = np.maximum(int_xmax - int_xmin, 0.)
        scores = np.divide(inter_len, len_anchors)
        return scores
    
    def __call__(self, results):
        video_info = results['video_info']
        match_map = results['match_map']
        anchor_xmin = results['anchor_xmin']
        anchor_xmax = results['anchor_xmax']

        video_second = video_info['duration']
        video_labels = video_info['annotations']

        gt_bbox = []
        gt_iou_map = []
        for gt in video_labels:
            tmp_start = max(min(1, gt["segment"][0] / video_second), 0)
            tmp_end = max(min(1, gt["segment"][1] / video_second), 0)
            gt_bbox.append([tmp_start, tmp_end])
            tmp_gt_iou_map = self.iou_with_anchors(match_map[:, 0],
                                                   match_map[:, 1], tmp_start,
                                                   tmp_end)
            tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
                                        [self.dscale, self.tscale])
            gt_iou_map.append(tmp_gt_iou_map)
        gt_iou_map = np.array(gt_iou_map)
        gt_iou_map = np.max(gt_iou_map, axis=0)

        gt_bbox = np.array(gt_bbox)
        gt_xmins = gt_bbox[:, 0]
        gt_xmaxs = gt_bbox[:, 1]
        gt_len_small = 3 * self.tgap
        gt_start_bboxs = np.stack(
            (gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
        gt_end_bboxs = np.stack(
            (gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)

        match_score_start = []
        for jdx in range(len(anchor_xmin)):
            match_score_start.append(
                np.max(
                    self.ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx],
                                          gt_start_bboxs[:, 0],
                                          gt_start_bboxs[:, 1])))
        match_score_end = []
        for jdx in range(len(anchor_xmin)):
            match_score_end.append(
                np.max(
                    self.ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx],
                                          gt_end_bboxs[:, 0], gt_end_bboxs[:,
                                                                           1])))

        gt_start = np.array(match_score_start)
        gt_end = np.array(match_score_end)

        results['gt_iou_map'] = gt_iou_map.astype(self.datatype) # 置信度图标签数据
        results['gt_start'] = gt_start.astype(self.datatype) # 起点标签数据
        results['gt_end'] = gt_end.astype(self.datatype)   # 终点标签数据
        return results


# 数据预处理模块进行封装
class Compose(object):
    """
    Composes several pipelines(include decode func, sample func, and transforms) together.

    Note: To deal with ```list``` type cfg temporaray, like:

        transform:
            - Crop: # A list
                attribute: 10
            - Resize: # A list
                attribute: 20

    every key of list will pass as the key name to build a module.
    XXX: will be improved in the future.

    Args:
        pipelines (list): List of transforms to compose.
    Returns:
        A compose object which is callable, __call__ for this Compose
        object will call each given :attr:`transforms` sequencely.
    """
    def __init__(self, train_mode=False):
        self.pipelines = list()

        self.pipelines.append(LoadFeat(feat_path=feat_path))
        self.pipelines.append(GetMatchMap(tscale=tscale))
        self.pipelines.append(GetVideoLabel(tscale=tscale, dscale=dscale))
        
    def __call__(self, data):
        # 将传入的 data 依次经过 pipelines 中对象处理
        for p in self.pipelines:
            try:
                data = p(data)
            except Exception as e:
                raise e
        return data

from paddle.io import Dataset

class BMNDataset(Dataset):
    """Video dataset for action localization.
    """
    def __init__(
        self,
        file_path,
        pipeline,
        subset,
        test_mode = False
    ):
        super(BMNDataset,self).__init__()
        self.subset = subset
        self.file_path = file_path
        self.pipeline = pipeline
        self.info = self.load_file()
        self.test_mode=test_mode
        
        data_prefix=None
        self.data_prefix = osp.realpath(data_prefix) if \
            data_prefix is not None and osp.isdir(data_prefix) else data_prefix
    
    def load_file(self):
        """Load index file to get video information."""
        info = []
        annos = json.load(open(self.file_path)) # 加载标注的json文件
        annos = annos['database']
        # 遍历每个视频名字
        for video_name in annos.keys():
            video_subset = annos[video_name]["subset"] # 判断当前视频所属的数据集training,validation,testing
            if self.subset in video_subset: # 是当前想要的数据集
                if os.path.exists(os.path.join(feat_path, video_name + '.csv')):
                    info.append(
                        dict(
                            video_name=video_name,
                            video_info=annos[video_name],
                        ))

        #sort by video_name
        sort_f = lambda elem: elem['video_name']
        info.sort(key=sort_f)

        #add video_idx to info
        for idx, elem in enumerate(info):
            info[idx]['video_idx'] = idx
        logger.info("{} subset video numbers: {}".format(
            self.subset, len(info))) # 打印当前数据集的数据个数

        return info

    def prepare_train(self, idx):
        """TRAIN & VALID: Prepare data for training/valid given the index."""
        results = copy.deepcopy(self.info[idx])
        results = self.pipeline(results)
        return results['video_feat'], results['gt_iou_map'], results['gt_start'],\
               results['gt_end']
    
    def prepare_test(self, idx):
        """TEST: Prepare the data for test given the index."""
        results = copy.deepcopy(self.info[idx])
        results = self.pipeline(results)
        return results['video_feat'], results['gt_iou_map'], results['gt_start'], \
               results['gt_end'], results['video_idx']
    
    def __len__(self):
        """get the size of the dataset."""
        return len(self.info)

    def __getitem__(self, idx):
        """ Get the sample for either training or testing given index"""
        if self.test_mode:
            return self.prepare_test(idx)
        else:
            return self.prepare_train(idx)