import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from message_filters import ApproximateTimeSynchronizer, Subscriber
import os
import os.path as osp
import sys
import torch
import argparse
# import open3d as o3d
from torch import Tensor


bridge = CvBridge()


def showImg(img,wati_time=0,win_name="r"):
    # print(img.shape)
    cv2.namedWindow(win_name,cv2.WINDOW_NORMAL)
    # cv2.resizeWindow(win_name,1920,1080)
    cv2.imshow(win_name,img)
    key = cv2.waitKey(wati_time) & 0xFF
    if key == ord('q'):
        sys.exit()
    return key


@torch.no_grad()
def get_points(n_voxels: Tensor, voxel_size: Tensor, origin: Tensor):
    points = torch.stack(torch.meshgrid([torch.arange(n_voxels[0]),
                                         torch.arange(n_voxels[1]),
                                         torch.arange(n_voxels[2]),]))
    new_origin = origin - n_voxels / 2.0 * voxel_size
    points = points * voxel_size.view(-1, 1, 1, 1) + new_origin.view(-1, 1, 1, 1)
    # points: [3, n_voxels[0], n_voxels[1], n_voxels[2]]
    return points

def generate_bev_ground_points(point_cloud_range,bev_size):
    # point_cloud_range=[0,-30,0,60,30,1]
    # bev_size = [600, 600]
    n_voxels = bev_size #+[2]

    voxel_size = torch.tensor([(point_cloud_range[3] - point_cloud_range[0]) / n_voxels[0],
                (point_cloud_range[4] - point_cloud_range[1]) / n_voxels[1],
                (point_cloud_range[5] - point_cloud_range[2]) / n_voxels[2]])
    # voxel_size_ = 0.1
    print("voxel_size: ",voxel_size)
    origin = origin=torch.tensor(
                        (np.array(point_cloud_range[:3]) + np.array(point_cloud_range[3:])) / 2.)
    print("origin: ",origin)

    points = get_points(  # [3, vx, vy, vz]
                    n_voxels=torch.tensor(n_voxels),
                    voxel_size=voxel_size,
                    origin=origin,
                ).to(torch.float32)

    # show_points = False
    # if show_points:
    #     def showPCDRGB(xyz,rgb,size_frame=1):
    #         # vis = o3d.visualization.Visualizer()
    #         # vis.create_window()
    #         point_cloud_o3d = o3d.geometry.PointCloud()
    #         point_cloud_o3d.points = o3d.utility.Vector3dVector(xyz[:,:3])
    #         point_cloud_o3d.colors = o3d.utility.Vector3dVector(rgb/255)
    #         frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=size_frame)
    #         # vis.add_geometry(point_cloud_o3d)
    #         # vis.add_geometry(frame)
    #         o3d.visualization.draw_geometries([point_cloud_o3d,frame])
    #         # vis.run()
    #         # vis.destroy_window()

    #     points_lidar = torch.stack((points[0].reshape(-1),points[1].reshape(-1),points[2].reshape(-1)))
    #     points_lidar = points_lidar.numpy().T
    #     print(points_lidar.shape) # points.shape   torch.Size([3, 600, 600, 1])
    #     print("::::::::::::::::::::::::: ")
    #     showPCDRGB(points_lidar,points_lidar)
    return points


def pcl2bev(xyz,rgb, ratio=1, width=400, height=1400,save_name=""):
    # pdb.set_trace()
    img_width = int(ratio * width)
    img_height = int(ratio * height)
    img = np.zeros((img_height, img_width, 3))
    # pcd, pcl_points = read_pcd(pcl_path)
    # colors = np.asarray(pcd.colors) * 255
    # colors = colors.astype("uint8")
    new_origin = np.array([[0,0],[img_width,0],[img_width,img_height],[0,img_height]])
    org_origin = np.array([[100,-20],[100,20],[-40,20],[-40,-20]])


    # ?? findHomography ???????? H
    H, _ = cv2.findHomography(new_origin,org_origin)
    org_origin = np.concatenate((org_origin,np.ones((org_origin.shape[0],1))),axis=1).T
    new_origin = np.concatenate((new_origin,np.ones((new_origin.shape[0],1))),axis=1).T
    # nn = (H @ new_origin).T[:,:2]
    new = (np.linalg.inv(H) @ org_origin)[:2,].T
    new[new<0.1] = 0
    print(new.T.tolist())

    xyz[:,2] = 1
    uv = (np.linalg.inv(H) @ xyz.T)[:2,].T

    uvrgb = np.round(np.concatenate((uv,rgb),axis=1)).astype(np.int)
    #filter 1000,900
    # pdb.set_trace()
    uvrgb = uvrgb[uvrgb[:,1]<img_height] # H < 900 keep
    uvrgb = uvrgb[uvrgb[:,0]<img_width] # w <1000 keep

    img = np.zeros((img_height,img_width,3)).astype(np.uint8)
    for p in uvrgb:
        img[p[1],p[0]] = [p[4],p[3],p[2]]


    img = cv2.flip(img,1)
    img = cv2.rotate(img,cv2.ROTATE_90_CLOCKWISE)
    # showImg(img)
    
    return img


def backproject_inplace(features, points, projection):
    '''
    function: 2d feature + predefined point cloud -> 3d volume
    input:
        features: [6, 64, 225, 400] ,zjh img: [1,3,1080,1920]
        points: [3, 200, 200, 12]   ,zjh points: [3,200,200,6]
        projection: [6, 3, 4]       ,zjh project:[1,3,4]
    output:
        volume: [64, 200, 200, 12]  ,zjh volume:[3,200,200,6]
    '''
    n_images, n_channels, height, width = features.shape
    n_x_voxels, n_y_voxels, n_z_voxels = points.shape[-3:]
    # [3, 200, 200, 12] -> [1, 3, 480000] -> [6, 3, 480000]
    points = points.view(1, 3, -1).expand(n_images, 3, -1)
    # [6, 3, 480000] -> [6, 4, 480000]
    points = torch.cat((points, torch.ones_like(points[:, :1])), dim=1)
    # ego_to_cam
    # [6, 3, 4] * [6, 4, 480000] -> [6, 3, 480000]
    # pdb.set_trace()
    points_2d_3 = torch.bmm(projection, points)  # lidar2img
    x = (points_2d_3[:, 0] / points_2d_3[:, 2]).round().long()  # [6, 480000]
    y = (points_2d_3[:, 1] / points_2d_3[:, 2]).round().long()  # [6, 480000]
    z = points_2d_3[:, 2]  # [6, 480000]
    valid = (x >= 0) & (y >= 0) & (x < width) & (y < height) & (z > 0)  # [6, 480000]

    # method2：特征填充，只填充有效特征，重复特征直接覆盖
    volume = torch.zeros(
        (n_channels, points.shape[-1]), device=features.device
    ).type_as(features)
    for i in range(n_images):
        volume[:, valid[i]] = features[i, :, y[i, valid[i]], x[i, valid[i]]]

    # volume = volume.view(n_channels, n_x_voxels, n_y_voxels, n_z_voxels)
    return volume


def mkdir_or_exist(dir_name, mode=0o777):

    dir_name = osp.expanduser(dir_name)
    if not osp.exists(dir_name):
        os.makedirs(dir_name, mode)

class CameraSync:
    def __init__(self,camera_type=None,camera_calib_E_dir="",save_dir=None,record=False,eval=False,debug=True):
        rospy.init_node('camera_sync', anonymous=True)

        self.size = 1080
        self.save_dir = save_dir
        self.camera_calib_E_dir = camera_calib_E_dir
        self.eval = eval
        self.debug = debug
        self.record = record
        self.count  = 0
        
        # self.all_camera = ['front_left', 'front_right','front_mid','rear_right','rear_left', 'top_left','top_right']
        self.camera_type =camera_type
        self.projections = self.get_pro_m_from_KEs()
        self.sub_cams = []
        for i in self.camera_type:
            sub_cam = Subscriber("/camera/"+i, Image)
            # sub_cam = Subscriber("/senyun/"+i, Image)
            print(i,sub_cam)
            self.sub_cams.append(sub_cam)

        ts = ApproximateTimeSynchronizer(self.sub_cams, queue_size=10, slop=0.1, allow_headerless=True)
        ts.registerCallback(self.callback)

        # self.pub = rospy.Publisher('/synced_image', Image, queue_size=10)

        # rospy.spin()
        rospy.sleep(20)

    def get_pro_m_from_KEs(self):
        PIXL_WIDTH_N = 2800
        PIXL_WIDTH = 1920
        
        projections=[]
        self.Ks=[]
        self.Ds=[]
        print(self.camera_type)
        for c_i in self.camera_type:
            K_p = self.camera_calib_E_dir+"/K/" + c_i +"/KD.yaml"
            fs  = cv2.FileStorage(K_p, cv2.FileStorage_READ)
            K = fs.getNode('K').mat()
            K[0][2] = K[0][2] + (PIXL_WIDTH_N-PIXL_WIDTH)/2
            K=K/2
            K[2][2] = 1.0
            # if self.size == 1280:
            #     K = K / 1.5
            #     K[2][2]=1
            # print(K)
            D = fs.getNode('D').mat()
            self.Ks.append(K)
            self.Ds.append(D)
            
            E_p = self.camera_calib_E_dir+"/E/transformed_extrinsic_" + c_i +".txt"
            E = np.loadtxt(E_p)
            projection= np.dot(K , E[:3,])
            projections.append(projection)

        return projections

    def callback(self, *args):
        imgs = []
        self.count+=1
        for i,data in enumerate(args):
            # img = bridge.imgmsg_to_cv2(arg, "bgr8")
            img = np.ndarray(shape=(data.height, data.width, 3), dtype=np.uint8, buffer=data.data)
            save_name = osp.join(self.camera_calib_E_dir,self.camera_type[i]+".jpg")
            cv2.imwrite(save_name,img)
            
            # img = cv2.undistort(img, self.Ks[i], self.Ds[i])   

            if self.save_dir != None:
                if self.record and self.count%10 == 0:
                    print(self.count)
                    timestamp = str(rospy.Time().now())
                    dir_name = osp.join(self.save_dir,self.camera_type[i].split("/")[-1])
                    mkdir_or_exist(dir_name)
                    save_path = osp.join(dir_name,timestamp+".jpg")
                    print(save_path)
                    cv2.imwrite(save_path,img)
            imgs.append(img)
        
        # im_black = np.zeros_like(img)
        # for i in range(8-len(imgs)):
        #     imgs.append(im_black)
        # # print("imgs : ",len(imgs))
        # h, w, c = imgs[0].shape
        # bg = np.zeros((h*2, w*4, c), dtype=np.uint8)

        # for i in range(8):
        #     row = int(i / 4)
        #     col = i % 4
        #     bg[row*h:(row+1)*h, col*w:(col+1)*w] = imgs[i]
        
        # if self.debug:
        #     cv2.namedWindow("r",cv2.WINDOW_NORMAL)
        #     cv2.imshow("r",bg)
        #     cv2.waitKey(1)

        # img_msg = bridge.cv2_to_imgmsg(bg, "bgr8")
        # self.pub.publish(img_msg)
        
        if self.eval:
            
            point_cloud_range=[-40,-20,0,100,20,1]
            bev_size = [1400, 400,1] # 0.1,0.1,1

            projections = torch.tensor(np.array(self.projections),dtype=torch.float32) # up dim to [1,3,4]
            print(projections.shape)
            # imgs
            imgs = torch.tensor(np.array(imgs))
            # imgs = torch.permute(imgs,(0,3,1,2)) # 1,1080,1920,3 --> 1,3,1080,1920
            imgs = torch.transpose(imgs, 1,3)
            imgs = torch.transpose(imgs, 2,3)
            print(imgs.shape)
            
            points = generate_bev_ground_points(point_cloud_range,bev_size)

            # get rgb 
            voxel_bgr = backproject_inplace(imgs, points, projections)  # [c, vx, vy, vz]
            print("volume_rgb: ",voxel_bgr.shape)
            voxel_bgr = voxel_bgr.T.numpy()
            voxel_rgb = np.flip(voxel_bgr,1)

            voxel_points = points.view(3,-1).T.numpy()
            one_p = np.array([np.ones_like(voxel_points[0])])

            voxel_points = np.vstack([voxel_points,one_p])
            voxel_rgb = np.vstack([voxel_rgb,one_p])
            final_points = np.concatenate((voxel_points,voxel_rgb),axis=1)

            save_name = osp.join(self.camera_calib_E_dir,"bev.jpg")
            img = pcl2bev(voxel_points,voxel_rgb, ratio=1, width=400, height=1400,save_name = save_name)
            print("resutl_save: ",save_name)
            cv2.imwrite(save_name, img.astype("uint8"))
            sys.exit()

            # mask1 = (final_points[:,3]>10) | (final_points[:,4]>10) | (final_points[:,5]>10)
            # mask2 = (final_points[:,0]==30)
            # mask = np.logical_and(mask1,mask2)
            # final_points = final_points[mask1]
            # print(final_points.shape)

            # show 
            # vis = o3d.visualization.Visualizer()
            # vis.create_window()
            # points_voxel_o3d = o3d.geometry.PointCloud()
            # points_voxel_o3d.points = o3d.utility.Vector3dVector(final_points[:,:3])
            # points_voxel_o3d.colors = o3d.utility.Vector3dVector(final_points[:,3:]/255)
            # vis.add_geometry(points_voxel_o3d)

            # frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=10)
            # vis.add_geometry(frame)
            # render_option = vis.get_render_option()
            # render_option.point_size = 4
            # render_option.background_color = np.asarray([1,1,1])
            # vis.run()
            # vis.destroy_window()


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='camera save or calib_eval')

    parser.add_argument('-d', '--debug', action='store_true', help='debug action')

    parser.add_argument('-e', '--eval', action='store_true', help='eval action') # yes or not , not for record
    parser.add_argument('-r', '--record', action='store_true', help='record action') # yes or not , not for record

    parser.add_argument('-t', '--camera_tops',default="/camera/top_right")
    parser.add_argument('-a', '--use_all_camera_tops',action='store_true')

    parser.add_argument('-c', '--camera_E_dir', default=None)
    parser.add_argument('-s', '--save_dir',default=None)

    args = parser.parse_args()

    debug_mode = args.debug
    eval = args.eval
    record = args.record
    camera_E_dir = args.camera_E_dir
    save_dir = args.save_dir
    camera_tops  =args.camera_tops
    use_all_camera_tops = args.use_all_camera_tops

    print('debug:', debug_mode)
    print('eval:', eval)
    print('record:', record)
    print('camera_E_dir:', camera_E_dir)
    print('save_dir:', save_dir)
    # print('camera_tops:', camera_tops)
    print('use_all_camera_tops:', use_all_camera_tops)
    
    print("************************************ ")
    
    if eval: # true
        print("this is eval camera calib program >>> ")
        if camera_E_dir == None:
            print("please set camera  E dir !!!")
            sys.exit()        
        print("make sure start all camera topics !!!")
        camera_tops = ['front_left','front_right','front_mid','top_left','top_right','rear_left','rear_right']
        print("camera_tops: ",camera_tops) 

    if record: # for record
        print("this is record mono camera img program >> ")
        if save_dir == None:
            print("please set save dir")
            # sys.exit()        
    
        if use_all_camera_tops:
            print("make sure start all camera topics !!!")
            camera_tops = ['front_left','front_right','front_mid','top_left','top_right','rear_left','rear_right']
            print("camera_tops: ",camera_tops) 
        else:
            camera_tops = [camera_tops]
            print("there is only one camera_tops: ",camera_tops) 
            print('save_dir:', save_dir)


            
    CameraSync(camera_tops,camera_E_dir,save_dir,record,eval,debug_mode)
