import numpy as np
import cv2
import os

roundx = 2
video_path = "E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d.avi"%(roundx,roundx)
flow_path = 'C:\\Users\\A\\Desktop\\merged.npy'
flow_dict = np.load(flow_path, allow_pickle=True).item()


cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

print(len(flow_dict.keys()))
cnt = 0
for key in flow_dict.keys():
    idx_int = int(key[:-4])
    if idx_int >= frame_count:continue
    cap.set(cv2.CAP_PROP_POS_FRAMES, idx_int)

    ret, cur_img = cap.read()
    cur_img_bgr = cur_img.copy()
    cur_img_bgr = cv2.resize(cur_img_bgr,(224,224))
    cur_img = cv2.resize(cur_img,(224,224))
    cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY)

    ret,next_img = cap.read()
    next_img = cv2.resize(next_img,(224,224))
    next_img = cv2.cvtColor(next_img, cv2.COLOR_BGR2GRAY)

    cal_flow = cv2.calcOpticalFlowFarneback(cur_img, next_img, None, 0.5, 3, 15, 3, 5, 1.2, 0)
    dict_flow = flow_dict[key]





    # 线条可视化方式
    line_step = 10
    h, w = cur_img.shape[:2]
    y, x = np.mgrid[line_step / 2:h:line_step, line_step / 2:w:line_step].reshape(2, -1).astype(int)
    fx, fy = cal_flow[y, x].T
    lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines)
    line = []
    for l in lines:
            line.append(l)
    cv2.polylines(cur_img_bgr, line, 0, (0,255,255))
    cv2.imshow('flow', cur_img_bgr)
    ch = cv2.waitKey(1)
    if ch == 27:
        break

    diff = np.average(np.abs(cal_flow - dict_flow))
    print(cnt, diff)
    cnt += 1
    
    

