
import cv2 
import os
import glob
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt

images = [cv2.imread(i) for i in sorted(glob.glob(os.path.join("Images/Input_set3", "*" + "png")))]

# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (100,100), maxLevel = 1, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

mid = int((len(images)/2))
ref_image = images[mid]
ref_image_edge = cv2.Canny(ref_image, 100, 200)
ref_image_gray = cv2.cv2.cvtColor(ref_image.astype("uint8"), cv2.COLOR_BGR2GRAY)
del images[mid]

hsv = np.zeros_like(ref_image)
hsv[...,1] = 255

warp = np.zeros((len(images), ref_image.shape[0], ref_image.shape[1], ref_image.shape[2]))

z=0

for image in images:
     
    #find edge pixel points
    edge = cv2.Canny(image, 100, 200)
       
    p0 = []
    for i in range(len(edge)):
        for j in range(len(edge[0])):
        
            if ref_image_edge[i][j] == 255:
                     
                p0.append([[j, i]])
    
    p0 = np.array(p0)   
    
    '''edge_img = np.zeros(ref_image.shape)
    for i in range(len(p0)):
                    
                    edge_img[p0[i][0][1]][p0[i][0][0]] = 255

    cv2.imshow('canny_edge ' + str(z), edge_img)
    #cv2.imwrite('canny_edge.jpg', edge_img)
    cv2.waitKey(0) 
    cv2.destroyAllWindows()'''
    
    p0 = p0.astype('float32')
    
    # calculate optical sparse flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(ref_image_edge, edge, p0, None, **lk_params)
    
    # Select good points
    pt_new = p1[st==1]
    pt_old = p0[st==1]

    pt2_new = p1[st==0]
    pt2_old = p0[st==0]
    
    '''edge_flow = np.zeros(ref_image.shape)
    for i in range(len(p1)):
            if p1[i][0][1] < ref_image.shape[0]:
                if p1[i][0][0] < ref_image.shape[1]:

                    edge_flow[p1[i][0][1].astype(int)][p1[i][0][0].astype(int)] = 255
                
    cv2.imshow('edge_flow', edge_flow)
    #cv2.imwrite('edge.jpg', edge_flow)
    cv2.waitKey(0) 
    cv2.destroyAllWindows()'''
    
    #homography- for background warping & aligning
    homography, mask = cv2.findHomography(pt_old, pt_new, method=cv2.RANSAC, ransacReprojThreshold=1)

    pt_old = pt_old[:, np.newaxis, :]
    pt_new = pt_new[:, np.newaxis, :]
    
    pt_new = pt_new.astype(int)
    pt_old = pt_old.astype(int)
    
    bkg_pts_old = pt_old[mask==1]
    bkg_pts_new = pt_new[mask==1]
    
    fgd_pts_old = pt_old[mask==0]
    fgd_pts_new = pt_new[mask==0]
      
    #for obstruction image warping not really necessary
    transform_matrix, mask2 = cv2.findHomography(fgd_pts_old, fgd_pts_new, method=cv2.RANSAC, ransacReprojThreshold=1)   
    
    fgd_pts_old = fgd_pts_old[:, np.newaxis, :]
    fgd_pts_new = fgd_pts_new[:, np.newaxis, :]
    
    fgd_pts_old = fgd_pts_old[mask2==1]
    fgd_pts_new = fgd_pts_new[mask2==1]
    
    bkg_image_sparse_old = np.zeros(ref_image.shape)
    bkg_image_sparse_new = np.zeros(ref_image.shape)
    fgd_image_sparse_old = np.zeros(ref_image.shape)
    fgd_image_sparse_new = np.zeros(ref_image.shape)  

    for i in range(len(bkg_pts_new)):
                     
            if bkg_pts_new[i][1] < ref_image.shape[0]:
                if bkg_pts_new[i][0] < ref_image.shape[1]:
                    
                    bkg_image_sparse_new[bkg_pts_new[i][1]][bkg_pts_new[i][0]] = 255
             
            bkg_image_sparse_old[bkg_pts_old[i][1]][bkg_pts_old[i][0]] = 255
  
    for i in range(len(fgd_pts_new)):
        
            if fgd_pts_new[i][1] < ref_image.shape[0]:
                if fgd_pts_new[i][0] < ref_image.shape[1]:
                    
                    fgd_image_sparse_new[fgd_pts_new[i][1]][fgd_pts_new[i][0]] = 255
             
            fgd_image_sparse_old[fgd_pts_old[i][1]][fgd_pts_old[i][0]] = 255

    
    '''cv2.imshow('bkg_sparse_img', bkg_image_sparse_old)
    #cv2.imwrite('bkg_sparse_img.jpg', bkg_image_sparse_old)
    cv2.waitKey(0) 
    cv2.destroyAllWindows()

    cv2.imshow('fgd_sparse_img', fgd_image_sparse_old)
    #cv2.imwrite('bkg_sparse_img.jpg', fgd_image_sparse_old)
    cv2.waitKey(0) 
    cv2.destroyAllWindows()'''

    flow = cv2.optflow.calcOpticalFlowSparseToDense(bkg_image_sparse_old.astype('uint8'), bkg_image_sparse_new.astype('uint8'), None, 8, 128, 0.05, True, 500.0, 1.5)

    mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
    hsv[...,0] = ang*180/np.pi/2
    hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)

    d_flow = cv2.applyColorMap(rgb, cv2.COLORMAP_HSV)
    
    '''cv2.imshow('frame2',d_flow)
    #cv2.imwrite("incorrect_dense_flow.jpg", d_flow)
    cv2.waitKey(0) 
    cv2.destroyAllWindows()'''
      
    h, w = flow.shape[:2]
    flow = -flow
    flow[:,:,0] += np.arange(w)
    flow[:,:,1] += np.arange(h)[:,np.newaxis]
   
    warp[z] = cv2.remap(image, flow, None, interpolation=cv2.INTER_LINEAR)

    '''#warp[z] = cv2.warpPerspective(image, homography, (ref_image.shape[1], ref_image.shape[0]))
   
    #cv2.imwrite('warps_tm' + str(z) + '.jpg', warp[z])
    #cv2.imshow('warp:' + str(z), warp[z].astype("uint8"))
    #cv2.waitKey(0) 
    #cv2.destroyAllWindows()'''

    z=z+1

warp = np.insert(warp, [mid], [ref_image], axis = 0)
  
#min pixel value for reflections images   
background_img = 255*(np.ones(image.shape))

for i in range(len(image)):
    for j in range(image.shape[1]):
        for c in range(image.shape[2]):
            for p in range(len(warp)):
                       
                pixel = warp[p][i][j][c]
                
                if background_img[i][j][c] > pixel and pixel > 0.0:              
                        
                    background_img[i][j][c] = pixel
                    
# Mean pixel value for obstruction images
'''background_img = np.mean(warp, axis = 0 )

gray_background = cv2.cvtColor(background_img.astype("uint8"), cv2.COLOR_BGR2GRAY)

alpha_map = np.zeros((ref_image.shape[0], ref_image.shape[1]))
diff = np.zeros(gray_background.shape)
#for image in range(len(images)):
for i in range(background_img.shape[0]):
       for j in range(background_img.shape[1]):
                
                diff[i][j] = gray_background[i][j] - ref_image_gray[i][j]

                if diff[i][j] > .1:
    
                    alpha_map[i][j] = 1 
        
print("bkg:", background_img)
print("alpha:", alpha_map)

#alpha = np.insert( )
alpha_map = cv2.cvtColor(alpha_map.astype("uint8"), cv2.COLOR_GRAY2BGR)
    
#cv2.imwrite("background(fgdR5-100-1-1).jpg",background_img)
cv2.imshow('background_img', background_img.astype("uint8"))
cv2.waitKey(0) 
cv2.destroyAllWindows()

background_img = background_img * alpha_map'''

cv2.imshow('background_img.jpg', background_img.astype("uint8"))
#cv2.imwrite('background_img.jpg', background_img.astype("uint8"))
cv2.waitKey(0) 
cv2.destroyAllWindows()

reflection_img = ref_image - background_img
        
cv2.imshow('reflection_img.jpg', reflection_img.astype("uint8"))
#cv2.imwrite('reflection_img.jpg', reflection_img.astype("uint8"))
cv2.waitKey(0) 
cv2.destroyAllWindows()
    



