import numpy as np
import scipy.io as sio
import h5py
from sklearn.metrics import f1_score
from knapsack_iter import knapSack
import warnings
import json
import pickle
import re

# np.seterr(divide='ignore', invalid='ignore')
# warnings.simplefilter("ignore", category=RuntimeWarning)


def recover(seg_scos, seg_b, shot_b, frm_num):
    """from [sample frame scores] to [shot scores]
    最后的seg score映射给剩余段更合理, 但是只有seg_b信息不能确定采样帧id

    :seg_scos: (sample_frame_num,)  实际是帧分数, 现在认为帧分数与seg分数对应
    :seg_b: seg boundaries, (sample_frame_num+1,)
    :shot_b: shot boundaries, (shot_num+1,)
    :frm_num: vdo total frames
    :returns:
        ret: (shot_num,)
        recover: (frm_num,)
    """
    seg_num = seg_b.shape[0] - 1
    shot_num = shot_b.shape[0] - 1
    assert seg_num == seg_scos.shape[0]
    ret = np.zeros(shot_num, dtype=np.float32)
    recover = np.zeros(frm_num, dtype=np.float32)
    if True:
        for i in range(seg_num-1):
            # NOTE: 因为存储的原因/切片的原因, e.g. 0:1 只赋值了0, 左闭右开
            recover[seg_b[i+1]:seg_b[i+2]] = seg_scos[i]
        recover[seg_b[seg_num]:] = seg_scos[-1]
    else:
        for i in range(seg_num):
            recover[seg_b[i]:seg_b[i+1]] = seg_scos[i]

    for i in range(shot_num):
        ret[i] = np.mean(recover[shot_b[i]:shot_b[i+1]])

    return ret, recover


def evaluate_tvsum(result_f):
    '''
    videoName:
    summary_selection: pred score
    '''
    final_result = json.load(open(result_f, encoding='utf-8'))
    data_type = None
    if re.search('summe', result_f) is not None:
        data_type = 'summe'
    elif re.search('tvsum', result_f) is not None:
        data_type = 'tvsum'
    else:
        print('dataset type error')

    # load all the ground truth data, include frames, shot_boundary, user_score
    if data_type == 'tvsum':
        # './datasets/TVSum/GT_2fps/%s.mat' % videoName
        gt_data = sio.loadmat('../../data/cnt_frm_TVSum.mat')
        gt_scores = gt_data['tvsum50']['gt_score'][0]
        nFrames = gt_data['cFrm'][0]
        cnt_sample = sio.loadmat('../../data/cnt_frm_sample_TVSum.mat')['segVid'].reshape(-1)
        shot_data = sio.loadmat('../../data/shot_TVSum.mat')['shot_boundaries'].reshape(-1)
    elif data_type == 'summe':
        # Note: SumMe_data.mat 通过matlab重新save
        summe = sio.loadmat('../../data/summe25.mat')['summe25']
        nameVid = summe['nameVid']
        gt_scores = summe['gtSet'][0][0][0]
        nFrames = summe['nFrames'][0][0][0]
        cnt_sample = sio.loadmat('../../data/cnt_frm_sample_SumMe.mat')['segVid'].reshape(-1)
        shot_data = sio.loadmat('../../data/shot_SumMe.mat')['shot_boundaries'].reshape(-1)
    # gt_data = h5py.File('../../data/SumMe_data.mat')
    # ipdb> print(list(gt_data.keys()))
    #   ...
    # ipdb> print(list(gt_data.items()))
    #   [('#refs#', <HDF5 group "/#refs#" (51 members)>),
    #   ('gtSet', <HDF5 dataset "gtSet": shape (25, 1), type "|O">),
    #   ('nFrames', <HDF5 dataset "nFrames": shape (25, 1), type "<f8">),
    #   ('nFrames_sample', <HDF5 dataset "nFrames_sample": shape (25, 1), type "<f8">),
    #   ('nameVid', <HDF5 dataset "nameVid": shape (25, 1), type "|O">)]

    # Note: 以下过程的接口(数据由vdo_idx索引):
    #   1. nFrames: (vdo_num, )
    #   1. gt_score: (vdo_num, )
    #   1. shot_data: (vdo_num, )
    #   1. cnt_sample: (vdo_num, )

    sum = 0
    for k, v in final_result.items():
        sample_frm_score, label = np.array(v)
        vdo = int(k[4:]) - 1

        gt_score = gt_scores[vdo].reshape((-1, ))
        shot_b = shot_data[vdo][0]
        seg_b = cnt_sample[vdo][0]
        nFrms = nFrames[vdo]
        shot_lbl, _ = recover(sample_frm_score, seg_b, shot_b, nFrms)
        # pred_lbl: frame level scores
        pred_lbl = np.zeros(gt_score.shape[0])
        for i in range(shot_b.shape[0]-1):    # shot_num
            pred_lbl[shot_b[i]:shot_b[i+1]] = shot_lbl[i]

        # Note: make shot_boudary become the shape of [size, 2],
        #   the first value is the begin of one shot, second is the end of shot
        shot_be = np.zeros([shot_b.size-1, 2], dtype=np.int32)
        for i in range(shot_b.size-1):
            # Note: shot_boundaries数据恰好符合切片的规则(左闭右开)
            shot_be[i, 0] = shot_b[i]
            shot_be[i, 1] = shot_b[i + 1]

        budget = 0.15   # video summary portion
        # segement is the same with original video
        # obtain the y predict label for shot boundary style
        y_pred = solve_knapsack(pred_lbl, shot_be, budget, nFrms, verbose=True)

        if False:
            # e.g. user_scores[0].shape  (10597, 20)
            user_scores = gt_data['tvsum50']['user_anno'][0][vdo]
            n_users = np.size(user_scores, 1)  # n_users=20 for TVSum
            y_true = {}
            f1_lst = []  # 20 f1 score for every user and predict label
            for i in range(n_users):
                gt_score = user_scores[:, i]
                y_true[i] = solve_knapsack(gt_score, shot_be, budget, nFrms)
                # Note: same as f1_score
                #   overlap = np.where((y_true[i]*y_pred)==1)[0].shape[0]
                #   downp = np.where(y_pred==1)[0].shape[0]
                #   downr = np.where(y_true[i]==1)[0].shape[0]
                #   p = overlap/downp
                #   r = overlap/downr
                #   f1 = 2*r*p/(r+p)
                f1 = f1_score(y_true[i], y_pred)
                f1_lst.append(f1)
            # f1score = np.max(f1_lst)
            f1score = np.mean(f1_lst)
        else:
            if True:
                y_true = solve_knapsack(gt_score, shot_be, budget, nFrms)
            else:
                y_true = np.zeros((y_pred.shape[0]), dtype=np.int32)
                for i in range(label.shape[0]-1):
                    y_true[seg_b[i+1]:seg_b[i+2]] = label[i]
                y_true[seg_b[label.shape[0]]:] = label[-1]
            f1score = f1_score(y_true, y_pred)
        print(f'ID.{vdo+1} idx to mat: {f1score}')
        sum += f1score

    print('avg:', sum/len(final_result.keys()))
    # return f1score


def solve_knapsack(frm_scores, segments, portion, nFrames, verbose=False):
    length = int(portion*nFrames)
    n_shots = np.size(segments, 0)
    shot_scores = np.zeros([n_shots, 1])

    info = []
    for i in range(n_shots):
        # rang = range(segments[i, 0], np.min(nFrames, segments[i, 1]))
        start = segments[i, 0]
        end = min(nFrames, segments[i, 1])
        # NOTE: 因为切片规则和存储从0开始
        shot_score = frm_scores[start:end]
        shot_scores[i] = np.mean(shot_score)
        info.append([start, end, shot_scores[i][0]])

    # print(shot_scores)

    shot_weight = (segments[:, 1]-segments[:, 0]).T

    # shot段选择binary标志
    out_shot = knapSack(length, shot_weight, shot_scores, shot_weight.size)
    if verbose is True:
        for i in range(n_shots):
            info[i].append(int(out_shot[i]))
        # print(info)
        f = open('info.pkl', 'wb')
        pickle.dump(info, f)
        f.close()

    # 转换为帧选择标志
    out = np.zeros(nFrames)
    for i in range(out_shot.size):
        if out_shot[i] == 1:
            out[segments[i, 0]:min(nFrames, segments[i, 1])] = 1

    # if all element of out is 0, do as following,(if all element of out is 0, sum(out)=0)
    # judge = np.zeros(out.shape)

    if np.sum(out) == 0:
        print('scores are all zero!!!')  # would not happen, just in case
        out = np.zeros(nFrames)
        # out = frm_scores # because pred_label has been dealed by
        out = list(map(lambda q: (1 if (q >= np.percentile(
            frm_scores, int((1-portion)*100))) else 0), frm_scores))
        out = np.asarray(out)
        # print('sum(out0) equal 0')

    # if the out is still zero, assign new data to it
    if np.sum(out) == 0:
        print('scores are still all zero!!!')  # would not happen, just in case
        i = 1
        # obtain the index of out(sort by descending)
        ind = np.argsort(-frm_scores)
        while sum(out)/np.size(out) < portion:
            x = ind[i]
            out[x] = 1
            i = i+1

    return out


if __name__ == "__main__":
    result_f = '../../data/results/summe_canonical_original_hs1024__lbl1_4/score/epoch_49.json'
    evaluate_tvsum(result_f)
    # for i in range(50):
    #     result_f = f'../../data/results/summe_canonical_original_hs1024__lbl1_1/score/epoch_{i}.json'
    #     evaluate_tvsum(result_f)

    for i in range(1, 6):
        result_f = f'../../data/results/summe_canonical_original_hs1024__lbl1_{i}/score/epoch_49.json'
        evaluate_tvsum(result_f)

