import os
import cv2
import pickle
from utils2 import *

round1_video_path = "E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round1/round1.avi"
round2_video_path = "E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2.avi"
index_map = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/IndexMap.pickle'
anchor_training_index_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs_method2_exp2/round2_method2_exp2_training_index.txt'
anchor_referenceLabel_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorReferenceLabel_method2_exp2.txt' # 默认没有reference label
supplement_opticalFlow_save_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs_method2_exp2/supplement_training_data_opticalFlow.npy'

index_map = open(index_map,'rb')
index_map = pickle.load(index_map)
index_map = index_map[1]
anchor_pos_list, anchor_neg_list = getAnchorPosNegIdx4(anchor_training_index_path)
anchor_idx = [anchor_pos_list[i][0] for i in range(len(anchor_pos_list))]
f = open(anchor_referenceLabel_path,'r')
lines = f.readlines()
f.close()
rf_label_anchor = [int(i[0]) for i in lines]
rf_label_anchor = np.array(rf_label_anchor)
anchor_idx = np.array(anchor_idx)
alerting_idx = anchor_idx[rf_label_anchor==3]

round1_video = cv2.VideoCapture(round1_video_path)
round2_video = cv2.VideoCapture(round2_video_path)

round2_frame_num = int(round2_video.get(cv2.CAP_PROP_FRAME_COUNT))
cur_round1_idx = round2_frame_num

sample_interval = 2
sampleNum = 16

flow_dict = dict()

for one_alerting_idx in alerting_idx:
    round1_idx = index_map[one_alerting_idx][0]
    data_idx = [round1_idx-i*sample_interval for i in range(int(sampleNum/2))] + [round1_idx+(i+1)*sample_interval for i in range(int(sampleNum/2))]
    data_idx.sort()
    for i in data_idx:
        round1_video.set(cv2.CAP_PROP_POS_FRAMES, i)

        # 获取当前图像，并进行resize、灰度化操作
        ret,img = round1_video.read()
        img = cv2.resize(img,(224,224))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 获取下一帧图像，并进行resize、灰度化操作
        ret, next = round1_video.read()
        next = cv2.resize(next,(224,224))
        nextgray = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)

        flow = cv2.calcOpticalFlowFarneback(gray, nextgray, None, 0.5, 3, 15, 3, 5, 1.2, 0)

        img_name = str(cur_round1_idx).rjust(10,'0') + '.png'
        flow_dict[img_name] = flow

        cur_round1_idx += 1

np.save(supplement_opticalFlow_save_path, flow_dict)
    

