# import packages
import matplotlib.pyplot as plt
import matplotlib.image as mping
import numpy as np
import cv2


def abs_sobel_thresh(image,orient='x',sobel_kernel=3,thresh=(0,255)):
    # this is to calculate the gradient in x/y direction.
    # generating the gray image.
    gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
    # to calculate the gradient in x/y
    if orient == 'x':
        abs_sobel = np.absolute(cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel))
    if orient == 'y':
        abs_sobel = np.absolute(cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel))
    # to scale the data.
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # to generate a blank array,as black picture.
    grad_binary = np.zeros_like(scaled_sobel)
    # 
    grad_binary[ ( scaled_sobel >= thresh[0] ) & ( scaled_sobel <= thresh[1] ) ] = 1
    #print((scaled_sobel>=thresh[0])&(scaled_sobel<=thresh[1]))
    return grad_binary

def rgb_select(img,r_thresh,g_thresh,b_thresh):
    r_channel = img[:,:,0]
    g_channel = img[:,:,1]
    b_channel = img[:,:,2]
    r_binary = np.zeros_like(r_channel)
    r_binary[(r_channel > r_thresh[0]) & (r_channel <= r_thresh[1])] = 1
    g_binary = np.zeros_like(g_channel)
    g_binary[(g_channel > g_thresh[0]) & (g_channel <= g_thresh[1])] = 1
    b_binary = np.zeros_like(b_channel)
    b_binary[(b_channel > b_thresh[0]) & (b_channel <= b_thresh[1])] = 1
    combined = np.zeros_like(r_channel)
    combined[((r_binary == 1) & (g_binary == 1) & (b_binary == 1))] = 1
    return combined

def color_gradient_threshold(image):
    ksize = 15
    gradx = abs_sobel_thresh(image,orient='x',sobel_kernel=ksize,thresh=(50,180))
    rgb_binary = rgb_select(image,r_thresh=(225,255),g_thresh=(180,255),b_thresh=(0,255))
    combined_binary = np.zeros_like(image)
    combined_binary[((gradx==1)|(rgb_binary==1))] = 255
    color_binary = combined_binary
    return color_binary


def region_of_interest(img,vertices):
    mask = np.zeros_like(img)
    cv2.fillPoly(mask,[vertices],[255,255,255])
    masked_image = cv2.bitwise_and(img,mask)
    print("region_of_interest shape: ",masked_image.shape)
    return masked_image

def perspective_transform(image):
    # give 4 points as original coordinates.
    # source:src
    top_left =[580,460]
    top_right = [700,460]
    bottom_left = [1096,720]
    bottom_right =  [200,720]
    # give 4 points to project.
    # destination:dst
    proj_top_left = [300,0]
    proj_top_right = [950,0]
    proj_bottom_left  =  [950,720]
    proj_bottom_right =  [300,720]
    # to get image size.
    img_size = (image.shape[1],image.shape[0])
    # 
    pts1 = np.float32([top_left,top_right,bottom_left,bottom_right])
    pts2 = np.float32([proj_top_left,proj_top_right,proj_bottom_left,proj_bottom_right])
    matrix_K = cv2.getPerspectiveTransform(pts1,pts2)
    matrix_K_back = cv2.getPerspectiveTransform(pts2,pts1)
    img_k = cv2.warpPerspective(image,matrix_K,img_size)
    #print("img_perspective: ",img_k)
    #print("img_perspective shape: ",img_k.shape)
    return img_k,matrix_K_back

def histogram_img(image):
    histogram_binary = np.zeros((image.shape[0],image.shape[1]),dtype=np.int_)
    histogram_binary[image[:,:,0]>0] = 1
    #print("histogram_binary:",histogram_binary)
    histogram = np.sum(histogram_binary[:,:],axis=0)
    #print("histogram: ",histogram)
    #print("histogram shape: ",histogram.shape)
    return histogram 

def lane_position(histogram):
    histogram_size = histogram.shape
    #print(histogram_size[0])
    middle_point = histogram_size[0]/2
    #print("middle_point: ",middle_point)
    #
    left_point = [0,0]
    for i in range(int(middle_point)):
        if histogram[i] > left_point[1]:
            left_point[1] = histogram[i]
            left_point[0] = i
    #
    right_point = [0,0]
    for j in range(int(middle_point),histogram_size[0]):
        if histogram[j] > right_point[1]:
            right_point[1] = histogram[j]
            right_point[0] = j
    
    result_points = [left_point,right_point]           
        
    #print("result_points: ",result_points)
    return result_points

def sliding_window(image,lanes_pos):
    # starting original points for windows.
    left_x_current = lanes_pos[0][0]
    right_x_current = lanes_pos[1][0]
    nWindows = 10
    window_height = np.int_(image.shape[0]//nWindows)
    window_width = 80
    # to get the non-zero data in the input image.
    nonzero = image.nonzero() 
    nonzero_y = nonzero[0]
    nonzero_x = nonzero[1]
    #
    # create a empty list to receive left/right line pixel.
    left_lane_inds = []
    right_lane_inds = []
    #
    out_img = np.dstack((image,image,image))
    #print("out_img: ",out_img)
    # create window by window
    for window in range(nWindows):
        # window size.
        win_y_top = image.shape[0] - (window +1)*window_height
        win_y_bottom = image.shape[0] - window*window_height
        win_x_left_left = left_x_current - window_width
        win_x_left_right = left_x_current + window_width 
        win_x_right_left = right_x_current - window_width
        win_x_right_right = right_x_current + window_width
        # define a rectangle for left+right lane.
        # and add the rectangle to the input image.
        #
        # if you want to draw the window lines,
        # you should delete the comment "#" in the below 2 lines.
        cv2.rectangle(image,(win_x_left_left,win_y_top),(win_x_left_right,win_y_bottom),(0,255,0),2)
        cv2.rectangle(image,(win_x_right_left,win_y_top),(win_x_right_right,win_y_bottom),(0,255,0),2)
        good_left_inds = ((nonzero_y >= win_y_top)&(nonzero_y < win_y_bottom)&(nonzero_x >= win_x_left_left)&(nonzero_x < win_x_left_right)).nonzero()[0]
        good_right_inds = ((nonzero_y >= win_y_top)&(nonzero_y < win_y_bottom)&(nonzero_x >= win_x_right_left)&(nonzero_x < win_x_right_right)).nonzero()[0]
        #print(good_left_inds)
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        #
        #print("nonzero_x_left:",nonzero_x[good_left_inds])
        #print("non_zero_x_right:",nonzero_x[good_right_inds])
        if len(good_left_inds)>50:
            left_x_current = np.int_(np.mean(nonzero_x[good_left_inds]))
        if len(good_right_inds)>50:
            right_x_current = np.int_(np.mean(nonzero_x[good_right_inds]))
    # ending of lop.
    #print("left_lane_inds",left_lane_inds)
    # to transfom a list of list to a list.
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)
    #print("left_lane_inds",left_lane_inds)
    left_x = nonzero_x[left_lane_inds]
    left_y = nonzero_y[left_lane_inds]
    right_x = nonzero_x[right_lane_inds]
    right_y = nonzero_y[right_lane_inds]
    #plt.plot(left_x,left_y,color='yellow')
    #plt.plot(right_x,right_y,color='red')
    #
    results = [image,left_x,left_y,right_x,right_y]
    #print("sliding windows results: ",results)
    return results

def fit_polynominal(img_sliding_window):
    # input messages.
    image = img_sliding_window[0]
    left_x = img_sliding_window[1]
    left_y = img_sliding_window[2]
    right_x = img_sliding_window[3]
    right_y = img_sliding_window[4]
    # to fit a line for the points.
    # y = a*x^2 + b*x + c
    # return of polyfit is [a,b,c]
    left_fit = np.polyfit(left_y,left_x,2)
    right_fit = np.polyfit(right_y,right_x,2)
    #print("left_fit: ",left_fit)
    # to generate x and y values for plotting.
    ploty = np.linspace(0,image.shape[0]-1,image.shape[0])
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    #print(ploty)
    #plt.plot(left_fitx,ploty,color='yellow')
    #plt.plot(right_fitx,ploty,color='red')
    results = [left_fitx,right_fitx,ploty]
    return results

def drawing_poly(img_ori, img_fit):
    # create an image to draw the lines on.
    #
    left_fitx = img_fit[0]
    right_fitx = img_fit[1]
    ploty = img_fit[2]
    #
    img_zero = np.zeros_like(img_ori)
    #
    #print("left_fitx:",left_fitx)
    #print("ploty:",ploty)
    pts_left = np.transpose(np.vstack([left_fitx,ploty]))
    #print("pts_left:",pts_left)
    #print("pts_left shape:",pts_left.shape)
    pts_right = np.transpose(np.vstack([right_fitx,ploty]))
    pts_right = np.flipud(pts_right)
    #print("pts_right:",pts_right)
    #print("pts_right shape:",pts_right.shape)
    pts = np.vstack((pts_left,pts_right))
    #print("pts_left+right:",pts)
    #print("pts_left+right shape:",pts.shape)
    img_mask = cv2.fillPoly(img_zero,np.int_([pts]),(0,255,0))
    #print("img_mask:",img_mask)
    #print("img_mask shape:",img_mask.shape)
    return img_mask

def drawing_poly_perspective_back(img_ori, img_fit,matrix_K_back):
    # create an image to draw the lines on.
    #
    left_fitx = img_fit[0]
    right_fitx = img_fit[1]
    ploty = img_fit[2]
    #
    img_zero = np.zeros_like(img_ori)
    #
    #print("left_fitx:",left_fitx)
    #print("ploty:",ploty)
    pts_left = np.transpose(np.vstack([left_fitx,ploty]))
    #print("pts_left:",pts_left)
    #print("pts_left shape:",pts_left.shape)
    pts_right = np.transpose(np.vstack([right_fitx,ploty]))
    pts_right = np.flipud(pts_right)
    #print("pts_right:",pts_right)
    #print("pts_right shape:",pts_right.shape)
    pts = np.vstack((pts_left,pts_right))
    #print("pts_left+right:",pts)
    #print("pts_left+right shape:",pts.shape)
    img_mask = cv2.fillPoly(img_zero,np.int_([pts]),(0,255,0))
    #print("img_mask:",img_mask)
    #print("img_mask shape:",img_mask.shape)
    # to get image size.
    img_size = (img_ori.shape[1],img_ori.shape[0])
    img_mask_back = cv2.warpPerspective(img_mask,matrix_K_back,img_size)
    return img_mask_back

if __name__ == "__main__":
    # video input.
    video_input = "road_video.mp4"
    cap = cv2.VideoCapture(video_input)
    # output setting.
    video_output = "road_video_output_v2.mp4"
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    width = 1280
    height = 720
    fps = 20
    video_out = cv2.VideoWriter(video_output,fourcc,fps,(width,height))
    # add some text to the output video.
    content = "this is frame: "
    pos = (64,90)
    color = (0,255,0)
    font = cv2.FONT_HERSHEY_SIMPLEX
    weight = 2
    size = 1
    count = 0
    #
    # prcessing frame by frame. 
    while True:
        ret,frame = cap.read()
        if not ret:
            print("video read error, exited...")
            break
        if cv2.waitKey(25) & 0xFF == ord('q'):
            print(" you quit the program by clicking 'q'...")
            break
        image = frame
        ksize = 15
        img_color = color_gradient_threshold(image)
        #
        left_bottom = [0, img_color.shape[0]]
        right_bottom = [img_color.shape[1],img_color.shape[0]]
        apex = [ img_color.shape[1]/2, 420 ]
        vertices = np.array([ left_bottom, right_bottom, apex ],np.int32)
        img_interest = region_of_interest(img_color,vertices)
        img_perspective,matrix_K_back = perspective_transform(img_interest)
        img_histogram = histogram_img(img_perspective)
        lanes_pos = lane_position(img_histogram)
        img_sliding_window = sliding_window(img_perspective,lanes_pos) 
        img_fit_list = fit_polynominal(img_sliding_window)
        ## to set the transparency of img.
        img_mask_back = drawing_poly_perspective_back(image,img_fit_list,matrix_K_back)
        #img_mask_back_result = img_mask_back*0.5 + image*0.5
        img_mask_back_result = cv2.addWeighted(image,1,img_mask_back,0.3,0)
        results = img_mask_back_result
        results_show = cv2.resize(results, (640, 480))
        cv2.imshow("frame",results_show)
        contents = content + str(count)
        cv2.putText(results,contents,pos,font,size,color,weight,cv2.LINE_AA)
        video_out.write(results)
        #
        count += 1
    cap.release()
    cv2.destroyAllWindows()
