"""
用于从gazebo生成的仿真图像中获取GT信息，边缘点，无拟合，无内点
"""

import numpy as np
import cv2
import os
import tqdm
from math import cos, pi, radians, sin
import argparse
import pickle
import yaml
import yaml.parser

LABEL_BYTES = 984003
BYTES_PER_PIXEL = 2
LINES = 61440
LINE_SAMPLES = 184320
LATITUDE_RANGE = 120  # deg
LONGITUDE_RANGE = 360  # deg
X_PIXEL_RANGE = 61440
Y_PIXEL_RANGE = 184320
scale_factor = 0.1  # 比例系数
rate = 3
scale_factor = 59 / scale_factor
MOON_RADIUS = 1737.4  # km
radius = MOON_RADIUS * 1000 / scale_factor
BASE_BATCH = 960


def arg_parse():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument(
        "-c",
        "--collect_dir",
        type=str,
        help="The directory of the collected data",
    )
    arg_parser.add_argument(
        "-l",
        "--label_dir",
        type=str,
        default="/disk527/sdb1/a804_cbf/datasets/lunar_crater",
        help="The directory of the label data",
    )
    arg_parser.add_argument("--cache", action="store_true", help="Whether to use cache")
    arg_parser.add_argument(
        "--image", action="store_true", help="Whether to use image as background"
    )
    return arg_parser.parse_args()


def sphere2xyz(lat, lon, dr):
    """
    Warning ! The unit of latitude and longitude is degree.
    Arguments:
        lat (float) : latitude, from -90 to 90
        lon (float) : longitude, from 0 to 360
    """
    lat = lat / 180 * pi
    lon = lon / 180 * pi
    x = (radius + dr) * np.cos(lat) * np.cos(lon)
    y = (radius + dr) * np.cos(lat) * np.sin(lon)
    z = (radius + dr) * np.sin(lat)
    return x, y, z


def plane2sphere(x, y):
    """
    Return:
        lat (float) : latitude in degree, from -60 to 60
        lon (float) : longitude in degree, from 0 to 360
    """
    lat = 60 - x * LATITUDE_RANGE / X_PIXEL_RANGE
    lon = y * LONGITUDE_RANGE / Y_PIXEL_RANGE
    return lat, lon


def distortCircle(lat, d):
    """
    Arguments:
        lat (float) : latitude in degree, from -60 to 60
        d (float) : diameter in meter
    """
    D_x = d / MOON_RADIUS * X_PIXEL_RANGE / radians(LATITUDE_RANGE)
    D_y = d / MOON_RADIUS * Y_PIXEL_RANGE / radians(LONGITUDE_RANGE) / cos(radians(lat))
    return D_x, D_y


class ChangE50m:
    def __init__(
        self,
        label_dir,
        row: range,
        col: range,
        origin_point: tuple,
        K: tuple,
        size: tuple,
        *,
        cache_dir=None,
    ):
        if cache_dir is not None:
            if not os.path.exists(cache_dir):
                os.makedirs(cache_dir)
            if os.path.exists(os.path.join(cache_dir, "gt_cache.pkl")):
                with open(os.path.join(cache_dir, "gt_cache.pkl"), "rb") as f:
                    args_dict = pickle.load(f)
                    for k, v in args_dict.items():
                        setattr(self, k, v)
            else:
                dem = np.zeros(
                    (BASE_BATCH * len(row), BASE_BATCH * len(col)), dtype=np.int16
                )
                data = np.zeros((0, 6), dtype=np.float32)
                idx = []
                for row_ in row:
                    for col_ in col:
                        dem_, data_, idx_ = self._load(label_dir, row_, col_)
                        dem[
                            row_ * BASE_BATCH : (row_ + 1) * BASE_BATCH,
                            col_ * BASE_BATCH : (col_ + 1) * BASE_BATCH,
                        ] = dem_
                        data = np.vstack((data, data_))
                        idx = idx + idx_
                self.origin_point = np.array(origin_point)
                self.K = np.array(K).reshape(3, 4)
                self.size = size
                self.points_, self.idx = self.findCraters(dem, data, idx)
                self.map_idx = idx
                with open(os.path.join(cache_dir, "gt_cache.pkl"), "wb") as f:
                    pickle.dump(self.__dict__, f)
        else:
            dem = np.zeros(
                (BASE_BATCH * len(row), BASE_BATCH * len(col)), dtype=np.int16
            )
            data = np.zeros((0, 6), dtype=np.float32)
            idx = []
            for row_ in row:
                for col_ in col:
                    dem_, data_, idx_ = self._load(label_dir, row_, col_)
                    dem[
                        row_ * BASE_BATCH : (row_ + 1) * BASE_BATCH,
                        col_ * BASE_BATCH : (col_ + 1) * BASE_BATCH,
                    ] = dem_[:-3, :-3]
                    data = np.vstack((data, data_))
                    idx = idx + idx_
            self.origin_point = np.array(origin_point)
            self.K = np.array(K).reshape(3, 4)
            self.size = size
            self.points_, self.idx = self.findCraters(dem, data, idx)
            self.map_idx = idx

    @property
    def points(self):
        return self.points_

    def findCraters(self, dem, data, idx):
        """
        从DEM中找到所有的陨石坑的边缘三维点，并将其坐标转为直角坐标
        """
        points = []
        points_id = []
        for id, (x, y, lat, lon, r, conf) in tqdm.tqdm(
            enumerate(data), desc="Preparing for crater points", total=len(idx)
        ):
            # 把所有完整弧的陨石坑都包含进来
            if conf < 0.75:
                continue
            D_x, D_y = distortCircle(lat, r)
            mask = np.zeros_like(dem, dtype=np.int16)
            cv2.ellipse(
                mask,
                (int(y), int(x)),
                (int(D_y / 2), int(D_x / 2)),
                0,
                0,
                360,
                1,
                2,
            )
            img = mask * dem
            dr = img[mask != 0] / scale_factor
            coor = cv2.findNonZero(mask).squeeze()
            lat, lon = plane2sphere(coor[:, 1], coor[:, 0])
            ids = np.ones_like(lat, dtype=np.int32) * id
            points.append(sphere2xyz(lat, lon, dr))
            points_id.append(ids)
        # 原点平移至标准位置
        points_ = np.concatenate(points, axis=1)
        points_id = np.concatenate(points_id)
        origin_point = self.origin_point * pi / 180
        R = self.Rotate([0, pi / 2 - origin_point[0], origin_point[1]])
        T = sphere2xyz(self.origin_point[0], self.origin_point[1], 0)
        T = -R @ np.array(T)
        points_ = np.pad(points_, ((0, 1), (0, 0)), "constant", constant_values=1)
        H = np.hstack((R, T[:, None]))
        H = np.vstack((H, np.array([0, 0, 0, 1])))
        cali_points = H @ points_
        return cali_points, points_id

    def __call__(self, t, pose, ori, output_dir, img=None) -> bool:
        """
        输入的是相机在世界坐标系下的位置和姿态，需要输出的是当前位姿条件下陨石坑在图像坐标系下各点位置
        Arguments:
            t (str|float) : 时间戳
            pose (list) : Gazebo仿真输出的位置
            ori (list) : Gazebo仿真输出的欧拉角姿态，按照Z-Y-X顺序，pitch-yaw-roll顺序排列结果
            img (np.ndarray) : 用于显示的图像, optional
        """
        image_dir = os.path.join(output_dir, "gt_images")
        points_id_dir = os.path.join(output_dir, "points_id")
        if not os.path.exists(image_dir):
            os.makedirs(image_dir)
        if not os.path.exists(points_id_dir):
            os.makedirs(points_id_dir)
        if img is None:
            img = np.zeros(self.size, dtype=np.uint8)
        p = self.world2camera(pose, ori)
        # p, p_idx = self.clip(p)
        img, th, p, p_idx = self.fill_points(p.astype(np.int32), img)
        if p.size < 100 or not th:
            # 数量太少，标注图像没有意义，应当舍去
            return False
        cv2.imwrite(f"{image_dir}/{t}.png", img)
        with open(f"{points_id_dir}/{t}.txt", "w") as f:
            f.write(f"x/pix,y/pix,crater_id\n")
            for (y, x), id in zip(p.T, p_idx):
                f.write(f"{x},{y},{self.map_idx[id]}\n")
        return True

    @staticmethod
    def _load(label_dir, row_, col_):
        with open(
            os.path.join(
                label_dir, f"labels/circle/{row_}/lbl_{row_}_{col_}_{BASE_BATCH}.txt"
            ),
            "r",
        ) as reader:
            dem = np.load(
                os.path.join(
                    label_dir, f"dem_int16/{row_}/dem_{row_}_{col_}_{BASE_BATCH}.npz"
                )
            )["image"]
            data = []
            idx = []
            for x in map(lambda line: line.strip().split(","), reader):
                data.append(
                    [
                        float(x[0]) + row_ * BASE_BATCH,  # x
                        float(x[1]) + col_ * BASE_BATCH,  # y
                        float(x[5]),  # lat
                        float(x[6]),  # lon
                        float(x[7]),  # radius
                        float(x[9]),  # confidence
                    ]
                )
                idx.append(x[4])

        return dem, data, idx

    def Rotate(self, ori):
        """
        三轴转角
        """
        x, y, z = ori
        R_x = np.array(
            [
                [1, 0, 0],
                [0, cos(x), sin(x)],
                [0, -sin(x), cos(x)],
            ]
        )
        R_y = np.array(
            [
                [cos(y), 0, -sin(y)],
                [0, 1, 0],
                [sin(y), 0, cos(y)],
            ]
        )
        R_z = np.array(
            [
                [cos(z), sin(z), 0],
                [-sin(z), cos(z), 0],
                [0, 0, 1],
            ]
        )

        return R_x @ R_y @ R_z

    def world2camera(self, pose, ori):
        R = self.Rotate(ori)
        # R_ 调整相机坐标系和图像坐标系的坐标转换，即y轴x轴互换
        R_ = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
        T = -R @ np.array(pose)
        H = np.hstack((R, T[:, None]))
        H = np.vstack((H, np.array([0, 0, 0, 1])))
        homo_points = self.K @ R_ @ H @ self.points
        # 齐次坐标转绝对坐标
        p = homo_points[:2] / homo_points[-1]
        return p

    def fill_points(self, points, image):
        # clip
        x = points[0]
        y = points[1]
        ind = (x < self.size[0]) & (x >= 0) & (y < self.size[1]) & (y >= 0)
        idx = self.idx[ind]
        points = points[:, ind]
        points_clip = []
        idx_clip = []
        if idx.size == 0:
            return image, False, np.array([]), np.array([])
        for it, i in enumerate(np.unique(idx)):
            points_i = points[:, idx == i]
            if self.diameter(points_i) > 0.5 * image.shape[0]:
                continue
            image[points_i[1], points_i[0], ...] = 255
            points_clip.append(points_i)
            idx_clip.append(idx[idx == i])
        if it < 5:
            return image, False, np.array([]), np.array([])
        return (
            image,
            True,
            np.concatenate(points_clip, axis=1),
            np.concatenate(idx_clip),
        )

    def diameter(self, points):
        height = points[0].max() - points[0].min()
        width = points[1].max() - points[1].min()
        return max(height, width)


def gt_contour(sub_dir, collected_dir, cache_dir):
    num_batches = LINES // BASE_BATCH
    num_samples = LINE_SAMPLES // BASE_BATCH

    with open(os.path.join(collected_dir, "config.yaml"), "r") as f:
        config = yaml.load(f.read(), Loader=yaml.FullLoader)
    row = config["world"]["row"]
    col = config["world"]["col"]

    delta_theta = 120 / num_batches
    delta_phi = 360 / num_samples
    origin_point = (
        60 - delta_theta * row[len(row) // 2],
        delta_phi * col[len(col) // 2],
        # 0,
    )

    gt_gen = ChangE50m(
        sub_dir,
        row,
        col,
        origin_point,
        config["camera"]["P"],
        (config["camera"]["height"], config["camera"]["width"]),
        cache_dir=cache_dir,
    )
    image_names = list(
        map(
            lambda x: float(x.removesuffix(".png")),
            os.listdir(f"{collected_dir}/images"),
        )
    )
    image_names.sort()
    it = 0
    image_name = image_names.pop(0)
    with open(f"{collected_dir}/pose.csv", "r") as f:
        f.readline()  # 去除标题
        with open(f"{collected_dir}/gt_labels.txt", "w") as writer:
            end_of_file = False
            for line in tqdm.tqdm(
                f, desc="Generating GT images", total=len(image_names)
            ):
                # 某个时刻的姿态
                data = line.split(",")
                time = float(data[0])
                if image_name - time > 2e-4:
                    continue
                else:
                    time = f"{image_name:.6f}"
                    if len(image_names) != 0:
                        image_name = image_names.pop(0)
                    else:
                        end_of_file = True

                pose = list(float(x) for x in data[1:4])
                ori = [float(data[6]), float(data[4]), float(data[5])]
                img = cv2.imread(f"{collected_dir}/images/{time}.png")

                if np.std(img) < 2:
                    continue
                flag = gt_gen(
                    time, pose, ori, collected_dir, img if args.image else None
                )
                if flag:
                    print(f"{time}", file=writer, flush=True)
                    it += 1
                if end_of_file:
                    break
            print("=" * 20 + f"\ntotal labels: {it}\n" + "=" * 20)


if __name__ == "__main__":
    args = arg_parse()
    for collect_dir in args.collect_dir.split(","):
        gt_contour(args.label_dir, collect_dir, "." if args.cache else None)
