from genericpath import exists
import numpy as np
import cv2
import time
from utils2 import AverageMeter

roundx = 2
start_frame = 10723
video_path = "E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d.avi"%(roundx,roundx)
flow_data_path = "E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_optFlowData.avi"%(roundx,roundx)
flow_vis_path = "E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/opticalFlowSample/%d.avi"%start_frame



# method = 'LK' # FB or LK
method = 'FB' # FB or LK
vis_method = 'line' # hsv or line
# vis_method = 'hsv'
line_step= 10 # 绘制线条的间隔像素点


cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# flow_data_video = cv2.VideoWriter(flow_data_path,fourcc,30.0,(width,height))
# flow_vis_video = cv2.VideoWriter(flow_vis_path,fourcc,5,(448,224))

if method == 'LK':
    feature_params = dict( maxCorners = 100,qualityLevel = 0.3,minDistance = 7,blockSize = 7 )
    lk_params = dict(winSize  = (15,15),maxLevel = 2,criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    ret, old_frame = cap.read()
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    color = np.random.randint(0,255,(old_gray.size,3))
    p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)#选取好的特征点，返回特征点列表
    mask = np.zeros_like(old_frame)

    p0 = np.zeros((old_gray.size,1,2), dtype=np.float32)
    for i in range(np.shape(old_gray)[0]):
        for j in range(np.shape(old_gray)[1]):
            p0[np.shape(old_gray)[0] * i + j,0,0] = j
            p0[np.shape(old_gray)[0] * i + j,0,1] = i


    while(1):
        ret,frame = cap.read()
        if frame is None: break
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)#选取好的特征点，返回特征点列表
        mask = np.zeros_like(old_frame)
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)#计算新的一副图像中相应的特征点的位置
        good_new = p1[st==1]
        good_old = p0[st==1]

        for i,(new,old) in enumerate(zip(good_new,good_old)):
            a,b = new.ravel() #ravel()函数用于降维并且不产生副本，相当于索引，而非深拷贝
            c,d = old.ravel()
            mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
            frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
            # frame = cv2.putText(frame, '%.2f'%err[i], (a,b),0,0.7,(0,0,0),2)
        img = cv2.add(frame,mask)

        cv2.imshow('frame',img)
        k = cv2.waitKey(0)
        if k == 27:
            break
        old_gray = frame_gray.copy()
        # p0 = good_new.reshape(-1,1,2)

    cv2.destroyAllWindows()
    cap.release()



if method == 'FB':
    ret, prev = cap.read()
    prev = cv2.resize(prev,(224,224))
    prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    hsv = np.zeros_like(prev)
    hsv[...,1] = 255
    process_time = AverageMeter()



    while True:
        start = time.time()
        ret, img = cap.read()
        img = cv2.resize(img,(224,224))
        fno = cap.get(cv2.CAP_PROP_POS_FRAMES)
        # if fno == start_frame + 100:
        #     flow_data_video.release()
        #     flow_vis_video.release()
        #     exit(0)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 使用Gunnar Farneback算法计算密集光流
        # flow的形状为(h,w,2)
        flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        # flow = int(flow)
        
        # flow_data_video.write(flow)
        prevgray = gray

        if vis_method == 'hsv':
            # hsv可视化方式
            # 笛卡尔坐标转换为极坐标，获得极轴和极角
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            hsv[...,0] = ang*180/np.pi/2  #角度
            hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) 
            bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
            cv2.imshow('frame2',bgr)

        if vis_method == 'line':
            # 线条可视化方式
            # 绘制线
            flow_mod = np.sqrt(np.power(flow[:,:,0],2) + np.power(flow[:,:,1],2))
            flow_mod = ((flow_mod/np.max(flow_mod))*255).astype(np.uint8)
            cv2.imshow('flow mod', flow_mod)

            h, w = gray.shape[:2]
            y, x = np.mgrid[line_step / 2:h:line_step, line_step / 2:w:line_step].reshape(2, -1).astype(int)
            fx, fy = flow[y, x].T
            lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
            lines = np.int32(lines)
            line = []
            for l in lines:
                # if l[0][0]-l[1][0]>3 or l[0][1]-l[1][1]>3:
                    line.append(l)
            cv2.polylines(img, line, 0, (0,255,255))
        # flow_vis_video.write(img)
        cv2.imshow('flow', img)

        flow_mod = np.concatenate((flow_mod[:,:,None], flow_mod[:,:,None],flow_mod[:,:,None]), axis=2)
        merge = np.concatenate((img,flow_mod),axis=1)

        # flow_vis_video.write(merge)
        # if fno > start_frame + 50:
        #     flow_vis_video.release()
        #     exit(0)
        cv2.imshow('merge',merge)

        ch = cv2.waitKey(0)
        if ch == 27:
            break
        process_time.update(time.time()-start)
        print('Average processing time %.5f'%(process_time.avg))
    cv2.destroyAllWindows()
    