import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from message_filters import ApproximateTimeSynchronizer, Subscriber
import os
import os.path as osp
import sys
import copy
import argparse


bridge = CvBridge()


def mkdir_or_exist(dir_name, mode=0o777):

    dir_name = osp.expanduser(dir_name)
    if not osp.exists(dir_name):
        os.makedirs(dir_name, mode)

def generate_bev_ground_points(x_min,x_max,y_min,y_max,voxel_size):
    
    x = np.arange(x_min,x_max,voxel_size)
    y = np.arange(y_min,y_max,voxel_size)

    points = []
    for ind_x in x:
        for ind_y in y:
            pt = [ind_x,ind_y,0.]
            points.append(pt)
    points = np.array(points).transpose()
    # pdb.set_trace()
    return points


def get_lidar_rgb(img, points_org,Intrinsics,Extrinsics):
    '''
    
    flp_uimg:  (720, 1280, 3)
    points_org:  (3,47351)

    # E:  (3, 4)
    R: (3,3)
    t: (3,1)

    K:  (3, 3)

    '''
    points = copy.deepcopy(points_org)
    points_rgb = np.zeros_like(points)

    points_num = points[0].shape[0]
    print("points_num: ",points_num)
    img_h, img_w, _ = img.shape
    print("h,w: ",img_h,img_w)



    # lidar to camera
    Extrinsics = Extrinsics[:3]
    rot_matrix = Extrinsics[:3,:3]
    tvec = Extrinsics[:3,-1]
    points[:3, :] = np.dot(rot_matrix, points[:3, :])
    for i in range(3):
        points[i, :] += tvec[i]
    points_camera = copy.deepcopy(points)
    print("camera points: " ,points_camera[:,0])
    

    # camera to img
    # pdb.set_trace()
    # points = np.array([[1.],[1.],[1.]])
    points = np.dot(Intrinsics, points)[:3, :]
    points /= points[2, :]
    points = points[:2, :].astype(np.int_)
    print("img points: " ,points[:,0])

    # find points(xuv index ) rgb
    for i in range(points_num): # p0 =w ,p1=h
        im_x = points[0,i]
        im_y = points[1,i]

        if im_x<=0 or im_x>=img_w or im_y<=0 or im_y>=img_h:
            continue
        points_rgb[:,i]= img[points[1,i],points[0,i]]
        # pdb.set_trace()

    return points_rgb


class CameraSync:
    def __init__(self,camera_type=None,camera_calib_E_dir="",save_dir=None,record=False,eval=False,debug=True):
        rospy.init_node('camera_sync', anonymous=True)

        self.size = 1080
        self.save_dir = save_dir
        self.camera_calib_E_dir = camera_calib_E_dir
        self.eval = eval
        self.debug = debug
        self.record = record
        self.count  = 0
        
        
        x_min = 0.
        x_max = 100.
        y_min = -20.
        y_max = 20.
        voxel_size = 0.1
        self.points_dim3 = generate_bev_ground_points(x_min,x_max,y_min,y_max,voxel_size)
        
        # self.all_camera = ['front_left', 'front_right','front_mid','rear_right','rear_left', 'top_left','top_right']
        self.camera_type =camera_type
        self.sub_cams = []
        for i in self.camera_type:
            sub_cam = Subscriber("/senyun/"+i, Image)
            print("/senyun/"+i,sub_cam)
            self.sub_cams.append(sub_cam)

        ts = ApproximateTimeSynchronizer(self.sub_cams, queue_size=10, slop=0.1, allow_headerless=True)
        ts.registerCallback(self.callback)

        # self.pub = rospy.Publisher('/synced_image', Image, queue_size=10)

        rospy.spin()
    def get_pro_m_from_KEs(self):
        projections=[]
        Ks = []
        Es = []
        for c_i in self.camera_type:
            K_p = self.camera_calib_E_dir+"/K/" + c_i +".yaml"
            fs  = cv2.FileStorage(K_p, cv2.FileStorage_READ)
            K = fs.getNode('K').mat()
            # if self.size == 1280:
            #     K = K / 1.5
            #     K[2][2]=1
            # print(K)
            D = fs.getNode('D').mat()
            
            E_p = self.camera_calib_E_dir+"/E/" + c_i +"_result.txt"
            E = np.loadtxt(E_p)
            projection= np.dot(K , E[:3,])
            projections.append(projection)
            Es.append(E)
            Ks.append(K)
        return projections,Ks,Es

    def callback(self, *args):
        imgs = []
        self.count+=1
        for i,arg in enumerate(args):
            img = bridge.imgmsg_to_cv2(arg, "bgr8")

            if self.save_dir != None:
                if self.record and self.count%10 == 0:
                    print(self.count)
                    timestamp = str(rospy.Time().now())
                    dir_name = osp.join(self.save_dir,self.camera_type[i].split("/")[-1])
                    mkdir_or_exist(dir_name)
                    save_path = osp.join(dir_name,timestamp+".jpg")
                    print(save_path)
                    cv2.imwrite(save_path,img)
            imgs.append(img)
        
        im_black = np.zeros_like(img)
        for i in range(8-len(imgs)):
            imgs.append(im_black)
        # print("imgs : ",len(imgs))
        h, w, c = imgs[0].shape
        bg = np.zeros((h*2, w*4, c), dtype=np.uint8)

        for i in range(8):
            row = int(i / 4)
            col = i % 4
            bg[row*h:(row+1)*h, col*w:(col+1)*w] = imgs[i]
        
        if self.debug:
            cv2.namedWindow("r",cv2.WINDOW_NORMAL)
            cv2.imshow("r",bg)
            cv2.waitKey(1)

        # img_msg = bridge.cv2_to_imgmsg(bg, "bgr8")
        # self.pub.publish(img_msg)
        if self.eval:
            self.projections,self.Ks,self.Es = self.get_pro_m_from_KEs()
            
            lidarRgb = get_lidar_rgb(img,points_dim3,Ks,Es)
            lidarRgb = lidarRgb.T
            points_dim3 = points_dim3.T
            print("points_dim3: ",points_dim3.shape)
            print("lidarRgb: ",lidarRgb.shape)

            one_p = np.array([np.ones_like(points_dim3[0])])
            points_dim3 = np.vstack([points_dim3,one_p])
            lidarRgb = np.vstack([lidarRgb,one_p])
            final_points = np.concatenate((points_dim3,lidarRgb),axis=1)
            # final_points = np.concatenate((voxel_points,voxel_rgb),axis=1)

            print("final_points: ",final_points.shape)

            vis = o3d.visualization.Visualizer()
            vis.create_window()
            points_voxel_o3d = o3d.geometry.PointCloud()
            points_voxel_o3d.points = o3d.utility.Vector3dVector(final_points[:,:3])
            points_voxel_o3d.colors = o3d.utility.Vector3dVector(final_points[:,3:]/255)
            vis.add_geometry(points_voxel_o3d)

            frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=4)
            vis.add_geometry(frame)
            render_option = vis.get_render_option()
            render_option.point_size = 2
            render_option.background_color = np.asarray([1,1,1])
            vis.run()
            vis.destroy_window()



            


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='camera save or calib_eval')

    parser.add_argument('-d', '--debug', action='store_true', help='debug action')

    parser.add_argument('-e', '--eval', action='store_true', help='eval action') # yes or not , not for record
    parser.add_argument('-r', '--record', action='store_true', help='record action') # yes or not , not for record

    parser.add_argument('-t', '--camera_tops',default="/camera/top_right")
    parser.add_argument('-a', '--use_all_camera_tops',action='store_true')

    parser.add_argument('-c', '--camera_E_dir', default=None)
    parser.add_argument('-s', '--save_dir',default=None)

    args = parser.parse_args()

    debug_mode = args.debug
    eval = args.eval
    record = args.record
    camera_E_dir = args.camera_E_dir
    save_dir = args.save_dir
    camera_tops  =args.camera_tops
    use_all_camera_tops = args.use_all_camera_tops

    print('debug:', debug_mode)
    print('eval:', eval)
    print('record:', record)
    print('camera_E_dir:', camera_E_dir)
    print('save_dir:', save_dir)
    # print('camera_tops:', camera_tops)
    print('use_all_camera_tops:', use_all_camera_tops)
    
    print("************************************ ")
    
    if eval: # true
        print("this is eval camera calib program >>> ")
        if camera_E_dir == None:
            print("please set camera  E dir !!!")
            sys.exit()        
        print("make sure start all camera topics !!!")
        camera_tops = ['front_left','front_right','front_mid','top_left','top_right','rear_left','rear_right']
        print("camera_tops: ",camera_tops) 

    if record: # for record
        print("this is record mono camera img program >> ")
        if save_dir == None:
            print("please set save dir")
            # sys.exit()        
    
        if use_all_camera_tops:
            print("make sure start all camera topics !!!")
            camera_tops = ['front_left','front_right','front_mid','top_left','top_right','rear_left','rear_right']
            print("camera_tops: ",camera_tops) 
        else:
            camera_tops = [camera_tops]
            print("there is only one camera_tops: ",camera_tops) 
            print('save_dir:', save_dir)


            
    try:
        CameraSync(camera_tops,camera_E_dir,save_dir,record,eval,debug_mode)
    except rospy.ROSInterruptException:
        pass
