import torch
from torch import Tensor
import numpy as np
import copy
import cv2
import json
import sys
import pdb
import os.path as osp
import glob
import os
import re

def readPcd_np(lidar_path):
    with open(lidar_path, 'rb') as f:
        for _ in range(11):
            line =f.readline()
            s = line.decode('utf-8') 
            if "POINTS" in s:
                print(s)
                pattern = r'\d+'  # 正则表达式，匹配一个或多个数字
                match = re.search(pattern, s)
                if match:
                    num_points = int(match.group())
                    print(num_points)
                else:
                    print("No match")
        data = np.fromfile(f, dtype=np.float32)
    
    points = np.reshape(data, (num_points, 4))
    return points
def showImg(img,wati_time=0,win_name="r"):
    # print(img.shape)
    cv2.namedWindow(win_name,cv2.WINDOW_NORMAL)
    # cv2.resizeWindow(win_name,1920,1080)
    cv2.imshow(win_name,img)
    key = cv2.waitKey(wati_time) & 0xFF
    if key == ord('q'):
        sys.exit()
    return key

@torch.no_grad()
def get_points(n_voxels: Tensor, voxel_size: Tensor, origin: Tensor):
    points = torch.stack(torch.meshgrid([torch.arange(n_voxels[0]),
                                         torch.arange(n_voxels[1]),
                                         torch.arange(n_voxels[2]),]))
    new_origin = origin - n_voxels / 2.0 * voxel_size
    points = points * voxel_size.view(-1, 1, 1, 1) + new_origin.view(-1, 1, 1, 1)
    # points: [3, n_voxels[0], n_voxels[1], n_voxels[2]]
    return points

def generate_bev_ground_points(point_cloud_range,bev_size):
    # point_cloud_range=[0,-30,0,60,30,1]
    # bev_size = [600, 600]
    n_voxels = bev_size #+[2]

    voxel_size = torch.tensor([(point_cloud_range[3] - point_cloud_range[0]) / n_voxels[0],
                (point_cloud_range[4] - point_cloud_range[1]) / n_voxels[1],
                (point_cloud_range[5] - point_cloud_range[2]) / n_voxels[2]])
    # voxel_size_ = 0.1
    print("voxel_size: ",voxel_size)
    origin = origin=torch.tensor(
                        (np.array(point_cloud_range[:3]) + np.array(point_cloud_range[3:])) / 2.)
    print("origin: ",origin)

    points = get_points(  # [3, vx, vy, vz]
                    n_voxels=torch.tensor(n_voxels),
                    voxel_size=voxel_size,
                    origin=origin,
                ).to(torch.float32)

    return points

def get_projections_from_EK(Ks,Es):
    '''
    describe: calculate 5 Es of eog to undis cam, and  concat
    will add another two : top left and top right
    should be order by imgs
    '''
    projections = []
    print(Ks)
    print(Es)
    for K,E in zip(Ks,Es):
        projection= np.dot(K , E[:3,])
        projections.append(projection.astype(np.float32))
    return projections

def backproject_inplace(features, points, projection):
    '''
    function: 2d feature + predefined point cloud -> 3d volume
    input:
        features: [6, 64, 225, 400] ,zjh img: [1,3,1080,1920]
        points: [3, 200, 200, 12]   ,zjh points: [3,200,200,6]
        projection: [6, 3, 4]       ,zjh project:[1,3,4]
    output:
        volume: [64, 200, 200, 12]  ,zjh volume:[3,200,200,6]
    '''
    n_images, n_channels, height, width = features.shape
    n_x_voxels, n_y_voxels, n_z_voxels = points.shape[-3:]
    # [3, 200, 200, 12] -> [1, 3, 480000] -> [6, 3, 480000]
    points = points.view(1, 3, -1).expand(n_images, 3, -1)
    # [6, 3, 480000] -> [6, 4, 480000]
    points = torch.cat((points, torch.ones_like(points[:, :1])), dim=1)
    # ego_to_cam
    # [6, 3, 4] * [6, 4, 480000] -> [6, 3, 480000]
    # pdb.set_trace()
    points_2d_3 = torch.bmm(projection, points)  # lidar2img
    x = (points_2d_3[:, 0] / points_2d_3[:, 2]).round().long()  # [6, 480000]
    y = (points_2d_3[:, 1] / points_2d_3[:, 2]).round().long()  # [6, 480000]
    z = points_2d_3[:, 2]  # [6, 480000]
    valid = (x >= 0) & (y >= 0) & (x < width) & (y < height) & (z > 0)  # [6, 480000]

    # method2??????????????????????
    volume = torch.zeros(
        (n_channels, points.shape[-1]), device=features.device
    ).type_as(features)
    for i in range(n_images):
        volume[:, valid[i]] = features[i, :, y[i, valid[i]], x[i, valid[i]]]

    # volume = volume.view(n_channels, n_x_voxels, n_y_voxels, n_z_voxels)
    return volume


def get_KE(K_p,E_p):
    fs  = cv2.FileStorage(K_p, cv2.FileStorage_READ)
    K = fs.getNode('K').mat()
    D = fs.getNode('D').mat()
    use_1400 = True
    if use_1400:
        K = K/2
        K[0][2]+=side_add_w/2
        K[2][2]=1
    if osp.exists(E_p)==False:
        print("file not exist: \n",E_p)
        exit()
    E = np.loadtxt(E_p)
    return K,D,E

def pcl2bev(xyz,rgb, ratio=1, width=400, height=1400):
    # pdb.set_trace()
    img_width = int(ratio * width)
    img_height = int(ratio * height)
    img = np.ones((img_height,img_width,3)).astype(np.uint8)
    img[:,:,0]=100

    # pcd, pcl_points = read_pcd(pcl_path)
    # colors = np.asarray(pcd.colors) * 255
    # colors = colors.astype("uint8")
    new_origin = np.array([[0,0],[img_width,0],[img_width,img_height],[0,img_height]])
    org_origin = np.array([[100,-20],[100,20],[-40,20],[-40,-20]])


    # ?? findHomography ???????? H
    H, _ = cv2.findHomography(new_origin,org_origin)
    org_origin = np.concatenate((org_origin,np.ones((org_origin.shape[0],1))),axis=1).T
    new_origin = np.concatenate((new_origin,np.ones((new_origin.shape[0],1))),axis=1).T
    # nn = (H @ new_origin).T[:,:2]
    new = (np.linalg.inv(H) @ org_origin)[:2,].T
    new[new<0.1] = 0
    print(new.T.tolist())

    xyz[:,2] = 1
    uv = (np.linalg.inv(H) @ xyz.T)[:2,].T

    uvrgb = np.round(np.concatenate((uv,rgb),axis=1)).astype(np.int64)
    #filter 1000,900
    # pdb.set_trace()
    uvrgb = uvrgb[uvrgb[:,1]<img_height] # H < 900 keep
    uvrgb = uvrgb[uvrgb[:,0]<img_width] # w <1000 keep

    for p in uvrgb:
        img[p[1],p[0]] = [p[4],p[3],p[2]]


    img = cv2.flip(img,1)
    img = cv2.rotate(img,cv2.ROTATE_90_CLOCKWISE)
    # pdb.set_trace() # 400, 1400
    # img[200-20:200+20,:400] = [0,0,0]# del midel 200-20,200+20

    
    return img

def getBev(source_im_path,calib_path,scenes_dir,cameras,bev_eval_img_save_path,use_1400=True):
    timestamp = source_im_path.split("/")[-1].split(".")[0]
    timestamp = source_im_path.split("/")[-1][:-4]
    lidar_p = osp.join(scenes_dir,"lidar",timestamp+".pcd")
    if osp.exists(lidar_p)==False:
        print("file not exist : ", lidar_p)
        exit()
    lidar_pts = readPcd_np(lidar_p)

    # point_cloud_range=[0,-20,0,100,20,1]
    # bev_size = [1000, 400,2] # 0.1,0.1,1

    # point_cloud_range=[-40,-20,0,100,20,1]
    # bev_size = [1400, 400,1] # 0.1,0.1,1

    point_cloud_range=[-40,-20,0,100,20,1]

    vis1 = lidar_pts[:,0]>-40
    vis2 = lidar_pts[:,0]<100
    vis = np.logical_and(vis1,vis2)
    vis3 = lidar_pts[:,1]>-20
    vis = np.logical_and(vis,vis3)
    vis4 = lidar_pts[:,1]<20
    vis = np.logical_and(vis,vis4)

    # vis5 = lidar_pts[:,0]<0
    # vis6 = lidar_pts[:,1]<1
    # vis_and56 = np.logical_and(vis5,vis6)

    # vis8 = lidar_pts[:,1]>-1
    # vis_and58 = np.logical_and(vis5,vis8)

    # vis_and568 = np.logical_and(vis_and56,vis_and58)
    # vis = np.logical_and(vis,vis_and568)

    lidar_pts = lidar_pts[vis]

    bev_size = [1400, 400,1] # 0.1,0.1,1

    # project 
    projections=[]
    imgs = []

    cams_type_dict_yaml_name ={'topleft':'top_left','topright':'top_right','frontleft':'front_left',
                               'frontmid':'front_mid','frontright':'front_right', 'rearleft':'rear_left',
                               'rearright':'rear_right'}
    
    # cameras = ['topleft','topright','frontleft', 'frontmid', 'frontright', 'rearleft', 'rearright']
    # cameras = ['frontleft', 'frontright','frontmid','topleft','topright','rearleft', 'rearright']
    # cameras = ['rearright', 'rearleft','frontleft', 'frontright']
    # cameras = ['topright','frontleft', 'frontright','frontmid', 'rearleft', 'rearright','topleft']
    # cameras = ['frontleft',"frontmid","frontright","topleft"]
    # cameras = ['front_mid']

    for cams_type in cameras:
        # calib
        calib_p = osp.join(calib_path,cams_type_dict_yaml_name[cams_type]+".yaml")
        calib_E_p = osp.join(calib_path.replace("cameras_in_calib_file","cameras_ex_calib_file"),cams_type_dict_yaml_name[cams_type]+".txt")
        print(calib_p)
        print(calib_E_p)
        K,D,E = get_KE(calib_p,calib_E_p)
        # print("K \n",K)
        # print("E \n",E)
        projection= np.dot(K , E[:3,])
        projections.append(projection)
        
        camera_dir_exit = False
        if camera_dir_exit:
            img_path = osp.join(scenes_dir,"camera",cams_type,timestamp+".jpg")
            if osp.exists(img_path)==False:
                img_path = osp.join(scenes_dir,"camera",cams_type,timestamp+".png")
        else:
            img_path = osp.join(scenes_dir,"image",cams_type,timestamp+".jpg")
            if osp.exists(img_path)==False:
                img_path = osp.join(scenes_dir,"image",cams_type,timestamp+".png")

        img = cv2.imread(img_path)
        # cv2.imwrite("org.jpg",img)
        showImg(img)
        if use_1400:
            #makeboard,resize 1/2
            img = cv2.copyMakeBorder(img,0,0,side_add_w,side_add_w,0)
            img = cv2.resize(img,(960+side_add_w,540))
            img = cv2.undistort(img,K,D)
        # else:
        #     img = cv2.undistort(img,K,D)
        showImg(img)
        # cv2.imwrite("processed.jpg",img)
        bright_more = False
        if bright_more:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            img = cv2.convertScaleAbs(gray, alpha=1.5, beta=0)
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
            showImg(img)

        imgs.append(img)

    show_final = False
    if show_final:
        img_black = np.zeros_like(img)
        if len(imgs) != 7:
            imgs.append(img_black)
        set1 = np.hstack([img_black,imgs[3],imgs[4]])
        set2 = np.hstack([imgs[0],imgs[2],imgs[1]])
        set3 = np.hstack([imgs[5],img_black,imgs[6]])
        final = np.vstack([set1,set2,set3])
        showImg(final,0,"final")
        cv2.imwrite("final.jpg", final)


    projections = torch.tensor(np.array(projections),dtype=torch.float32) # up dim to [1,3,4]
    print(projections.shape)

    # imgs
    imgs = torch.tensor(np.array(imgs))
    # imgs = torch.permute(imgs,(0,3,1,2)) # 1,1080,1920,3 --> 1,3,1080,1920
    print(imgs.shape)  #torch.Size([7, 3, 720, 1280])

    imgs = torch.transpose(imgs, 1,3)
    imgs = torch.transpose(imgs, 2,3)
    print(imgs.shape)  #torch.Size([7, 720, 1280, 3])
    
    points = generate_bev_ground_points(point_cloud_range,bev_size)

    # get rgb 
    voxel_bgr = backproject_inplace(imgs, points, projections)  # [c, vx, vy, vz]
    print("left_: ",voxel_bgr.shape)
    voxel_bgr = voxel_bgr.T.numpy()
    voxel_rgb = np.flip(voxel_bgr,1)

    voxel_points = points.view(3,-1).T.numpy()
    one_p = np.array([np.ones_like(voxel_points[0])])

    voxel_points = np.vstack([voxel_points,one_p])
    voxel_rgb = np.vstack([voxel_rgb,one_p])
    final_points = np.concatenate((voxel_points,voxel_rgb),axis=1)

    # mask1 = (final_points[:,3]>10) | (final_points[:,4]>10) | (final_points[:,5]>10)
    # mask2 = (final_points[:,0]==30)
    # mask = np.logical_and(mask1,mask2)
    # final_points = final_points[mask1]
    # print(final_points.shape)


    img_rgb = pcl2bev(voxel_points,voxel_rgb, ratio=1, width=400, height=1400)
    showImg(img_rgb)

    rgb_lidar  = np.ones_like(lidar_pts)
    rgb_lidar[:,0]=255
    img_lidar = pcl2bev(lidar_pts[:,:3],rgb_lidar[:,:3], ratio=1, width=400, height=1400)

    eval_img = cv2.addWeighted(img_rgb,0.7,img_lidar,0.3,0.7)
    showImg(eval_img)
    cv2.imwrite(bev_eval_img_save_path+"org.jpg", img_rgb.astype("uint8"))
    cv2.imwrite(bev_eval_img_save_path, eval_img.astype("uint8"))
    

def mkdir_or_exist(dir_name, mode=0o777):
    if osp.exists(dir_name)==True:
        return
    else:
        if dir_name == '':
            return
        dir_name = osp.expanduser(dir_name)
        os.makedirs(dir_name, mode=mode)

use_1400 = True # source img is raw, need to do undistorsion
side_add_w = 440 # set 440 is our used 
camera_type_w_name_dict_ = {"front_left":'frontleft',
                            "front_mid":'frontmid',
                            "front_right":'frontright',
                            "rear_left":'rearleft',
                            "rear_right":'rearright',
                            "top_left":'topleft',
                            "top_right":'topright'}
if __name__ == "__main__":

    qturck_num=sys.argv[1]
    scenes_dir=sys.argv[2]
    cam_type=sys.argv[3]

    save_dir  = osp.join("./log",qturck_num)
    


    calib_path = "/home/westwell/welldriver/wellpilot_config/calib/cameras_in_calib_file/"+qturck_num
    
    if cam_type=="all":
        cameras = ["frontmid","frontleft","rearleft","frontright","rearright","topleft","topright"]
        cameras = ["topleft","topright"]
        source_im_path=glob.glob(osp.join(scenes_dir,"image","topright","*"))[0]
    else:
        cameras = [camera_type_w_name_dict_[cam_type]]
        source_im_path=glob.glob(osp.join(scenes_dir,"image",camera_type_w_name_dict_[cam_type],"*"))[0]
    # cameras = ["frontmid","frontleft","rearleft","frontright","rearright","topleft","topright"]
    # cameras = ["frontmid","frontleft","frontright","topleft"]
    # cameras = ["topleft","topright"]
    # cameras = ["topright"]

    #### code 
    mkdir_or_exist(save_dir)
    mkdir_or_exist(save_dir+"/eval_res")
    
    for cam in cameras:
        getBev(source_im_path,calib_path,scenes_dir,[cam],save_dir+"/eval_res/eval_"+cam+".jpg",use_1400)
    getBev(source_im_path,calib_path,scenes_dir,cameras,save_dir+"/eval_res/eval_fusion.jpg",use_1400)
    print(save_dir+"/eval_res/eval_fusion.jpg")
    
