#include "camera_adapter.h"
#include "muduo/base/CountDownLatch.h"
#include "muduo/base/Timestamp.h"

#include "bevdet_data.h"
#include "cam_info.h"

#include <cv_bridge/cv_bridge.h>
#include <utils/topic.h>

using namespace perception::camera;

namespace perception::camera {

extern void make_train_data(const std::unordered_map<std::string, cv::Mat>& images,
    std::unordered_map<std::string, std::vector<ObjectTrackInfoPtr>>& tracked);
}

class FrameStatistics {
public:
    void receivedOneFrame()
    {
        frames_time_record_.insert(muduo::Timestamp::now());
    }

    double fps()
    {
        auto iter = frames_time_record_.upper_bound(muduo::addTime(muduo::Timestamp::now(), -10));
        if (iter == frames_time_record_.end()) {
            return NAN;
        }
        frames_time_record_.erase(frames_time_record_.begin(), iter);
        if (frames_time_record_.size() < 2) {
            return NAN;
        }
        const auto tdiff = muduo::timeDifference((*frames_time_record_.rbegin()), (*frames_time_record_.begin()));
        return frames_time_record_.size() / tdiff;
    }

private:
    std::set<muduo::Timestamp> frames_time_record_;
};

static FrameStatistics gFrameStatistics;

static std::map<std::string, bool> getCameras(void)
{
    static std::map<std::string, bool> cameras {
        { "camera0", true },
        { "camera1", true },
        { "camera2", true },
        { "camera3", true },
        { "camera4", true },
        { "camera5", true },
        { "camera6", true },
    };
    return cameras;
}

static std::map<ObjectType, std::string> typid2str_tbl = {
    { ObjectType::UNKNOWN, "unknown" },
    { ObjectType::UNKNOWN_MOVABLE, "movable" },
    { ObjectType::UNKNOWN_UNMOVABLE, "unmovable" },
    { ObjectType::CAR, "car" },
    { ObjectType::VAN, "van" },
    { ObjectType::TRUCK, "truck" },
    { ObjectType::BUS, "bus" },
    { ObjectType::CYCLIST, "cyclist" },
    { ObjectType::MOTORCYCLIST, "motorcyclist" },
    { ObjectType::TRICYCLIST, "tricyclist" },
    { ObjectType::PEDESTRIAN, "pedestrian" },
    { ObjectType::TRAFFICCONE, "trafficcone" },
    { ObjectType::SAFETY_TRIANGLE, "triangle" },
    { ObjectType::BARRIER_DELINEATOR, "delineator" },
    { ObjectType::BARRIER_WATER, "water" },
    { ObjectType::ANIMAL, "animal" }
};

CameraAdapter::CameraAdapter(std::string name)
    : Node(std::move(name))
    , logger_(rclcpp::get_logger("CameraAdapter"))
{
    using namespace std::placeholders;

    // init detector
    detectors_ = std::make_shared<CameraDetectors>(1, "/home/ubuntu/workroot/etc/");
    reid_ = ReID::get_detector("/home/ubuntu/workroot/etc/");
    lane_detector_ = LaneDetector::get_detector("/home/ubuntu/workroot/etc/");
    lane_seg_ = LaneSeg::get_detector("/home/ubuntu/workroot/etc/");

    // init tracker
    trackers_["camera0"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera0.json",
        3840, 2160, 640, 360, "/home/ubuntu/workroot/etc/roi/camera0.png", "camera0");
    trackers_["camera1"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera1.json",
        3840, 2160, 640, 360, "/home/ubuntu/workroot/etc/roi/camera1.png", "camera1");
    trackers_["camera2"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera2.json",
        1920, 1080, 640, 360, "/home/ubuntu/workroot/etc/roi/camera2.jpg", "camera2");
    trackers_["camera3"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera3.json",
        1920, 1080, 640, 360, "/home/ubuntu/workroot/etc/roi/camera3.png", "camera3");
    trackers_["camera4"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera4.json",
        1920, 1080, 640, 360, "/home/ubuntu/workroot/etc/roi/camera4.png", "camera4");
    trackers_["camera5"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera5.json",
        1920, 1080, 640, 360, "/home/ubuntu/workroot/etc/roi/camera5.png", "camera5");
    trackers_["camera6"] = std::make_shared<OneCameraTracker>("/home/ubuntu/workroot/etc/inex/camera6.json",
        1920, 1080, 640, 360, "/home/ubuntu/workroot/etc/roi/camera6.png", "camera6");

    // objects tx puber
    objects_pub_ = this->create_publisher<interface::msg::Objects>("/perception/cameraFused/objects", 30);
    lane_pub_ = this->create_publisher<interface::msg::MultiLanePoints>(lanePointsTopic, 30);

    // image tx puber
    painted_images_pub_ = this->create_publisher<interface::msg::Images>("/perception/cameraFused/images", 30);

    // recv image
    sub_ = this->create_subscription<interface::msg::Images>("/sensor/camera/images", 30, std::bind(&CameraAdapter::onImages, this, _1));
}

void CameraAdapter::onImages(std::shared_ptr<interface::msg::Images> msg)
{
    std::unordered_map<std::string, cv::Mat> images;
    std::unordered_map<std::string, builtin_interfaces::msg::Time> tsms;
    std::unordered_map<std::string, std::vector<ObjectTrackInfoPtr>> tracked;

    /*DEBUG*/
    int64_t rx_now = muduo::Timestamp::now().microSecondsSinceEpoch() / 1000;
    static int64_t recv_time = rx_now;
    RCLCPP_INFO(logger_, "PECPDEBUG RX diff: %ld", rx_now - recv_time);
    recv_time = rx_now;

    for (auto& i : msg->images) {
        const auto camera = i.header.frame_id;
        if (getCameras().find(camera) == getCameras().end() || getCameras().find(camera)->second == false) {
            RCLCPP_ERROR(logger_, "Unsupported camera:%s", camera.c_str());
            continue;
        }
        images[camera] = cv_bridge::toCvCopy(i, "bgr8")->image;
        tsms[camera] = i.header.stamp;
        int64_t this_tsms = int64_t(i.header.stamp.sec) * 1000 + i.header.stamp.nanosec / 1000 / 1000;
        RCLCPP_INFO(logger_, "recv camera:%s, tsms: %ld (%d %d)", camera.c_str(), this_tsms,
            images[camera].rows, images[camera].cols);
    }

    // detect
    RCLCPP_INFO(logger_, "detectors_->process begin");
    auto result = detectors_->process(images);

    RCLCPP_INFO(logger_, "detectors_->process end");

    // track
    RCLCPP_INFO(logger_, "track begin");
    for (auto& detected : result) {
        const auto camera = detected.first;
        const auto this_tsms = int64_t(tsms[camera].sec) * 1000 + tsms[camera].nanosec / 1000 / 1000;
        trackers_[camera]->process(detected.second, tracked[camera], images[camera], this_tsms);
    }
    RCLCPP_INFO(logger_, "track end");

    // lane detect
    RCLCPP_INFO(logger_, "lane detect begin");
    std::string lane_camname = "camera1";
    lanes_impts_.clear();
    lanes_gpts_.clear();
    lane_cnts_.clear();
    lane_detector_->process(images[lane_camname], lanes_impts_);
    cv::imwrite("output_image.jpg", images[lane_camname]);
    lane_seg_->process(images[lane_camname], lane_cnts_);
    CameraInfo* cam_info = CameraInfo::get_inst();
    for (auto& lane : lanes_impts_) {
        std::vector<cv::Point2f> gpts;
        // 确保输入坐标是归一化的图片坐标, 输出坐标系，原点后车轮中心点，XYZ正方向为前左上
        cam_info->convert_uv2xy(lane_camname, lane, gpts);
        lanes_gpts_.push_back(std::move(gpts));
    }
    RCLCPP_INFO(logger_, "lane detect end");
    interface::msg::MultiLanePoints laneData;
    for (auto& i : lanes_gpts_) {
        interface::msg::LanePoints lanePoints;
        for (auto& j : i) {
            interface::msg::LanePoint lanePoint;

            lanePoint.x = j.y * -1;
            lanePoint.y = j.x;
            lanePoints.lane_points.emplace_back(std::move(lanePoint));
        }
        laneData.multi_lane_points.emplace_back(std::move(lanePoints));
    }
    lane_pub_->publish(laneData);
    send_tracked_objects(msg->header.stamp, tsms, tracked);

#if 0 // 制作训练数据
    make_train_data(images, tracked);
#endif
    // paint_on_images(images, tracked);
    // 画3D框
    // paint_on_images(images, result);

    // paint_on_images(images, tracked);
    paint_on_images(images, result);
    paint_on_bev(images, result, tracked);

#if 0
    // send images
    cv::Mat tobeshow = images["camera0"];
    // cv::resize(images["camera0"], tobeshow, cv::Size(1280, 720));
    cv::imshow("camera1", tobeshow);
    cv::waitKey(10);
#endif
    static size_t counter = 0;
    counter++;
    for (auto& i : msg->images) {
        // cv::imwrite("./images/" + i.header.frame_id + "_" + std::to_string(counter) + ".jpg", images[i.header.frame_id]);
        cv_bridge::CvImage(i.header, "bgr8", images[i.header.frame_id]).toImageMsg(i);
    }

    painted_images_pub_->publish(*msg);

    /*DEBUG*/
    int64_t tx_now = muduo::Timestamp::now().microSecondsSinceEpoch() / 1000;
    static int64_t send_time = tx_now;
    RCLCPP_INFO(logger_, "PECPDEBUG TX diff: %ld", tx_now - send_time);
    send_time = tx_now;
}

void CameraAdapter::paint_on_images(std::unordered_map<std::string, cv::Mat>& images,
    std::unordered_map<std::string, std::vector<ObjectTrackInfoPtr>>& tracked)
{
    for (auto& image_pair : images) {
        auto& key = image_pair.first;
        auto& image = image_pair.second;
        auto& result = tracked[key];

        auto f2d = [](const Point2f& in) -> cv::Point { return cv::Point(in.x, in.y); };
#if 1
        // paint
        for (auto obj : result) {
            auto& pts8 = obj->detect.pts8;
            // 底面四边形(俯视图)
            cv::line(image, f2d(pts8.pts8[0]), f2d(pts8.pts8[1]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[1]), f2d(pts8.pts8[2]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[2]), f2d(pts8.pts8[3]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[3]), f2d(pts8.pts8[0]), cv::Scalar(0, 0xFF, 0xFF), 1);

            // 顶部四边形
            cv::line(image, f2d(pts8.pts8[4]), f2d(pts8.pts8[5]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[5]), f2d(pts8.pts8[6]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[6]), f2d(pts8.pts8[7]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[7]), f2d(pts8.pts8[4]), cv::Scalar(0, 0xFF, 0xFF), 1);

            // 竖直线
            cv::line(image, f2d(pts8.pts8[0]), f2d(pts8.pts8[4]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[1]), f2d(pts8.pts8[5]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[2]), f2d(pts8.pts8[6]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[3]), f2d(pts8.pts8[7]), cv::Scalar(0, 0xFF, 0xFF), 1);

            // trackid + type
            cv::rectangle(image,
                f2d(obj->detect.box.left_top), f2d(obj->detect.box.right_bottom), cv::Scalar(0, 0xFF, 0xFF));

            std::stringstream ss;
            ss << obj->tracker.track_id << ":" << typid2str_tbl[ObjectType(obj->detect.type_id)]
               << ":(" << obj->detect.box.left_top.x << "," << obj->detect.box.left_top.y << ")";
            const auto text = ss.str();
            const cv::Size text_size = cv::getTextSize(text, cv::FONT_HERSHEY_COMPLEX, 1, 1, nullptr);
            const auto rect = cv::Rect(obj->detect.box.left_top.x, obj->detect.box.left_top.y - text_size.height, text_size.width, text_size.height);
            cv::rectangle(image, rect, cv::Scalar(0x00, 0x00, 0x00), cv::FILLED);
            cv::putText(image, text, f2d(obj->detect.box.left_top), cv::FONT_HERSHEY_COMPLEX, 0.75, cv::Scalar(0, 0xFF, 0xFF), 1);
        }
#endif
    }
}

void CameraAdapter::paint_on_images(std::unordered_map<std::string, cv::Mat>& images,
    std::unordered_map<std::string, std::vector<ObjectDetectInfoPtr>>& detected)
{

    for (auto& image_pair : images) {
        auto& key = image_pair.first;
        auto& image = image_pair.second;
        auto& result = detected[key];

        auto f2d = [](const Point2f& in) -> cv::Point { return cv::Point(in.x, in.y); };
#if 1
        // paint
        for (auto obj : result) {
            auto& pts8 = obj->pts8;
            // 底面四边形(俯视图)
            cv::line(image, f2d(pts8.pts8[0]), f2d(pts8.pts8[1]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[1]), f2d(pts8.pts8[2]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[2]), f2d(pts8.pts8[3]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[3]), f2d(pts8.pts8[0]), cv::Scalar(0, 0xFF, 0xFF), 1);

            // 顶部四边形
            cv::line(image, f2d(pts8.pts8[4]), f2d(pts8.pts8[5]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[5]), f2d(pts8.pts8[6]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[6]), f2d(pts8.pts8[7]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[7]), f2d(pts8.pts8[4]), cv::Scalar(0, 0xFF, 0xFF), 1);

            // 竖直线
            cv::line(image, f2d(pts8.pts8[0]), f2d(pts8.pts8[4]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[1]), f2d(pts8.pts8[5]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[2]), f2d(pts8.pts8[6]), cv::Scalar(0, 0xFF, 0xFF), 1);
            cv::line(image, f2d(pts8.pts8[3]), f2d(pts8.pts8[7]), cv::Scalar(0, 0xFF, 0xFF), 1);

            // trackid + type
            cv::rectangle(image,
                f2d(obj->box.left_top), f2d(obj->box.right_bottom), cv::Scalar(0, 0xFF, 0xFF));
            cv::Rect rct(obj->box.left_top.x, obj->box.left_top.y, obj->box.Width(), obj->box.Height());
            cv::Mat objim = image(rct);
            // reid_->process(objim, obj->object_feature);
            //  std::cout << "feat:" << obj->object_feature.size() << std::endl;
            //  std::stringstream ss;
            //  ss << obj->tracker.track_id << ":" << typid2str_tbl[ObjectType(obj->detect.type_id)]
            //     << ":(" << obj->detect.box.left_top.x << "," << obj->detect.box.left_top.y << ")";
            //  const auto text = ss.str();
            //  const cv::Size text_size = cv::getTextSize(text, cv::FONT_HERSHEY_COMPLEX, 1, 1, nullptr);
            //  const auto rect = cv::Rect(obj->detect.box.left_top.x, obj->detect.box.left_top.y - text_size.height, text_size.width, text_size.height);
            //  cv::rectangle(image, rect, cv::Scalar(0x00, 0x00, 0x00), cv::FILLED);
            //  cv::putText(image, text, f2d(obj->detect.box.left_top), cv::FONT_HERSHEY_COMPLEX, 0.75, cv::Scalar(0, 0xFF, 0xFF), 1);
        }
#endif
    }
}

void CameraAdapter::paint_on_bev(std::unordered_map<std::string, cv::Mat>& images,
    std::unordered_map<std::string, std::vector<ObjectDetectInfoPtr>>& detected,
    std::unordered_map<std::string, std::vector<ObjectTrackInfoPtr>>& tracked)
{
    // 左右60米，前100米，后70米， 120x170, 单位分米:1200x1700
    float scale = 5;
    cv::Mat bev_img = cv::Mat::zeros(1700, 1200, CV_8UC3);
    int bev_imh = bev_img.rows;
    int bev_imw = bev_img.cols;
    int originx = bev_imw / 2;
    int originy = bev_imh / 2;
    cv::circle(bev_img, cv::Point(originx, originy), 10, cv::Scalar { 0, 0, 255 }, -1);
    int view_w = 960, view_h = 540; // , infer_w = 640, infer_h = 360;
    cv::Mat canvas = cv::Mat::zeros(bev_imh + 540 * 3, bev_imw + 960 * 2, CV_8UC3);
    CameraInfo* cam_info = CameraInfo::get_inst();
    // draw reid
    std::map<std::string, std::string> cam_nb = {
        { "camera1", "camera0" },
        { "camera2", "camera1" },
        { "camera3", "camera1" },
        { "camera4", "camera2" },
        { "camera5", "camera3" },
        { "camera6", "camera4" }
    };
    int tid = 0;
    for (size_t icam = 0; icam < 7; ++icam) {
        std::string camera_name = "camera" + std::to_string(icam);
        auto& cur_objs = detected[camera_name];
        std::string nb_cname = cam_nb[camera_name];
        auto& nb_objs = detected[nb_cname]; // neighbour
        for (auto& cobj : cur_objs) {
            float mindist = 999999999;
            int minn = -1;
            for (size_t in = 0; in < nb_objs.size(); ++in) {
                auto& nobj = nb_objs[in];
                float dist = reid_->cosine_dist(cobj->object_feature, nobj->object_feature);
                if (dist < mindist) {
                    mindist = dist;
                    minn = in;
                }
            }
            if (minn != -1) {
                auto& cim = images[camera_name];
                auto& nim = images[nb_cname];
                std::stringstream cscore;
                std::stringstream nscore;
                cscore << tid << ":" << std::fixed << std::setprecision(2) << mindist;
                nscore << tid << ":" << std::fixed << std::setprecision(2) << mindist;
                cv::putText(cim, cscore.str(), cv::Point2d(cobj->bottom_uv.x, cobj->bottom_uv.y),
                    cv::FONT_HERSHEY_COMPLEX, 0.8,
                    cv::Scalar(0, 0, 0xFF), 1);
                cv::putText(nim, nscore.str(),
                    cv::Point2d(nb_objs[minn]->bottom_uv.x, nb_objs[minn]->bottom_uv.y),
                    cv::FONT_HERSHEY_COMPLEX, 0.8,
                    cv::Scalar(0, 0, 0xFF), 1);
                ++tid;
            }
        }
    }
    for (auto& image_pair : images) {
        auto& cam_name = image_pair.first;
        auto& image = image_pair.second;
        int orig_img_w = image.cols;
        int orig_img_h = image.rows;
        // auto &result = detected[cam_name];
        auto& result = tracked[cam_name];
        std::cout << "cam_name:" << cam_name << std::endl;
        std::cout << "rows:" << image.rows << std::endl;
        // auto f2d = [](const Point2f& in) -> cv::Point { return cv::Point(in.x, in.y); };
        cv::Mat resized;
        cv::resize(image, resized, cv::Size(view_w, view_h));
        for (auto obj : result) {
            auto& detobj = obj->detect;
            // auto &detobj = obj;
            auto& pts8 = detobj.pts8.pts8;
            int left = detobj.box.left_top.x / orig_img_w * view_w;
            int top = detobj.box.left_top.y / orig_img_h * view_h;
            int right = detobj.box.right_bottom.x / orig_img_w * view_w;
            int bot = detobj.box.right_bottom.y / orig_img_h * view_h;
            std::cout << "ltrb:" << left << "," << top << "," << right << "," << bot << std::endl;
            std::cout << "detect:" << detobj.box.left_top.x << "," << detobj.box.left_top.y
                      << "," << detobj.box.right_bottom.x << "," << detobj.box.right_bottom.y << std::endl;
            // for(int ikpt = 0; ikpt < 8; ++ikpt){
            //     auto cpt = cv::Point(pts8[ikpt].x / orig_img_w * view_w, pts8[ikpt].y / orig_img_h * view_h);
            //     cv::putText(resized, std::to_string(ikpt), cpt, cv::FONT_HERSHEY_COMPLEX, 0.75, cv::Scalar(0, 0xFF, 0xFF), 1);
            // }
            cv::rectangle(resized,
                cv::Point(left, top), cv::Point(right, bot), cam_color.at(cam_name));
            cv::Point2f botuv { detobj.bottom_uv.x, detobj.bottom_uv.y };
            cv::Point2f front_mpt { (pts8[0].x + pts8[1].x) / 2 / orig_img_w, (pts8[0].y + pts8[1].y) / 2 / orig_img_h }; // front middle point
            cv::Point2f back_mpt { (pts8[2].x + pts8[3].x) / 2 / orig_img_w, (pts8[2].y + pts8[3].y) / 2 / orig_img_h };
            std::vector<cv::Point2f> bf_pts { back_mpt, front_mpt };
            std::vector<cv::Point2f> bf_gpts;
            cam_info->convert_uv2xy(cam_name, bf_pts, bf_gpts);
            float bX = bf_gpts[0].x, bY = bf_gpts[0].y; // back X, Y
            float fX = bf_gpts[1].x, fY = bf_gpts[1].y; // front X, Y
            float cbX = -bY, cbY = bX;
            float cfX = -fY, cfY = fX;

            decision::math::Segment2d segment(decision::math::Vec2d(cbX, cbY), decision::math::Vec2d(cfX, cfY));
            auto angle = legalize_angle((M_PI_2 - segment.heading()) / M_PI * 180);
            std::stringstream ss;
            ss << std::fixed << std::setprecision(2) << angle << "," << fX - bX << "," << fY - bY;

            std::string stext = typid2str_tbl[ObjectType(detobj.type_id)];
            // std::to_string((int)bX)
            // cv::putText(resized, stext, cv::Point(back_mpt.x * view_w, back_mpt.y * view_h),
            //     cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
            // std::string stext2 = ss.str()
            cv::putText(resized, ss.str(), cv::Point(back_mpt.x * view_w, back_mpt.y * view_h - 50),
                cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
            cv::circle(resized, cv::Point(back_mpt.x * view_w, back_mpt.y * view_h), 4, cam_color.at(cam_name), -1);
            cv::circle(resized, cv::Point(front_mpt.x * view_w, front_mpt.y * view_h), 2, cam_color.at(cam_name), -1);
            cv::line(resized, cv::Point(back_mpt.x * view_w, back_mpt.y * view_h),
                cv::Point(front_mpt.x * view_w, front_mpt.y * view_h),
                cam_color.at(cam_name));
            cv::circle(resized, cv::Point(pts8[0].x / orig_img_w * view_w, pts8[0].y / orig_img_h * view_h),
                2, cam_color.at(cam_name), -1);
            cv::putText(resized, "0", cv::Point(pts8[0].x / orig_img_w * view_w, pts8[0].y / orig_img_h * view_h),
                cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
            cv::circle(resized, cv::Point(pts8[1].x / orig_img_w * view_w, pts8[1].y / orig_img_h * view_h),
                2, cam_color.at(cam_name), -1);
            cv::putText(resized, "1", cv::Point(pts8[1].x / orig_img_w * view_w, pts8[1].y / orig_img_h * view_h),
                cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
            cv::circle(resized, cv::Point(pts8[2].x / orig_img_w * view_w, pts8[2].y / orig_img_h * view_h),
                2, cam_color.at(cam_name), -1);
            cv::putText(resized, "2", cv::Point(pts8[2].x / orig_img_w * view_w, pts8[2].y / orig_img_h * view_h),
                cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
            cv::circle(resized, cv::Point(pts8[3].x / orig_img_w * view_w, pts8[3].y / orig_img_h * view_h),
                2, cam_color.at(cam_name), -1);
            cv::putText(resized, "3", cv::Point(pts8[3].x / orig_img_w * view_w, pts8[3].y / orig_img_h * view_h),
                cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
            int bpx = (int)(-bY * scale + bev_imw / 2); // back bev pixel x
            int bpy = (int)(-bX * scale + bev_imh / 2);
            int fpx = (int)(-fY * scale + bev_imw / 2);
            int fpy = (int)(-fX * scale + bev_imh / 2);
            std::cout << "img backxy:" << back_mpt.x << "," << back_mpt.y
                      << ", back wxy:" << bX << "," << bY
                      << ", back proj img xy:" << bpx << "," << bpy
                      << ", img frontxy:" << front_mpt.x << "," << front_mpt.y
                      << ", front wxy:" << fX << "," << fY
                      << ", front proj img xy:" << fpx << "," << fpy
                      << std::endl;
            cv::line(bev_img, cv::Point(bpx, bpy), cv::Point(fpx, fpy), cam_color.at(cam_name));
            cv::circle(bev_img, cv::Point(bpx, bpy), 4, cam_color.at(cam_name), -1);
        }
        // draw lane
        if (cam_name == "camera1") {
            for (const auto& lane_points : lanes_impts_) {
                for (const auto& point : lane_points) {
                    cv::Point2d pt { point.x * view_w, point.y * view_h };
                    cv::circle(resized, pt, 1, cv::Scalar(0, 255, 0), -1);
                }
            }
            for (const auto& lane_points : lanes_gpts_) {
                for (const auto& point : lane_points) {
                    cv::Point2d pt { point.x, point.y };
                    int px = (int)(-point.y * scale + bev_imw / 2);
                    int py = (int)(-point.x * scale + bev_imh / 2);
                    cv::circle(bev_img, cv::Point2d(px, py), 4, cv::Scalar(0, 255, 0), -1);
                    std::string stext = std::to_string(point.y);
                    cv::putText(resized, stext, cv::Point2d(px, py),
                        cv::FONT_HERSHEY_COMPLEX, 0.5, cam_color.at(cam_name), 1);
                }
            }
        }
        cv::Rect rect;
        if (cam_name == "camera0")
            rect = cv::Rect(view_w, 0, view_w, view_h);
        else if (cam_name == "camera1")
            rect = cv::Rect(view_w, view_h, view_w, view_h);
        else if (cam_name == "camera2")
            rect = cv::Rect(0, view_h * 2, view_w, view_h);
        else if (cam_name == "camera3")
            rect = cv::Rect(view_w + bev_imw, view_h * 2, view_w, view_h);
        else if (cam_name == "camera4")
            rect = cv::Rect(0, view_h * 3, view_w, view_h);
        else if (cam_name == "camera5")
            rect = cv::Rect(view_w + bev_imw, view_h * 3, view_w, view_h);
        else if (cam_name == "camera6")
            rect = cv::Rect(view_w, view_h * 2 + bev_imh, view_w, view_h);
        else {
            assert(false);
        }
        cv::Mat sub_reg = canvas(rect);
        resized.copyTo(sub_reg);
    }

    static int iframe = -1;
    ++iframe;
    bev_img.copyTo(canvas(cv::Rect(view_w, view_h * 2, bev_imw, bev_imh)));
    cv::imwrite("./log/draw_canvas/" + std::to_string(iframe) + ".jpg", canvas);
}

void CameraAdapter::send_tracked_objects(const builtin_interfaces::msg::Time& tsms,
    std::unordered_map<std::string, builtin_interfaces::msg::Time>& tsmss,
    const std::unordered_map<std::string, std::vector<ObjectTrackInfoPtr>>& tracked)
{
    interface::msg::Objects objects;
    objects.header.stamp = tsms;
    objects.header.frame_id = "cameraFused";

    for (auto& track : tracked) {
        for (auto& in : track.second) {
            interface::msg::Object out;
            out.header.stamp = tsmss[track.first];
            out.header.frame_id = track.first;

            out.set__source(1); // 相机
            const auto idoffset = 0; // 以camera后缀修改id, 确保不同相机id不重叠
            out.set__id(idoffset + in->tracker.track_id);

            auto iter = objectTypeMapBevdetType.find(ObjectType(in->detect.type_id));
            out.set__type(static_cast<int>((iter != objectTypeMapBevdetType.end()) ? iter->second : ObjectMsgType::UNKNOWN));

            out.set__x(in->tracker.x);
            out.set__y(in->tracker.y);
            out.set__z(0);
            out.set__heading(in->tracker.hea);
            out.set__speedx(in->tracker.speedx);
            out.set__speedy(in->tracker.speedy);
            out.set__length(5);
            out.set__width(2);
            out.set__height(1.5);
            out.extend.feature = in->detect.object_feature;

            objects.objects.emplace_back(out);
        }
    }

    objects_pub_->publish(objects);

    gFrameStatistics.receivedOneFrame();
    RCLCPP_INFO(logger_, "send objects %lu fps:%f", objects.objects.size(), gFrameStatistics.fps());
}

// void CameraAdapter::convert_uv2xy(const cv::Matx<double, 3, 3> &K, const cv::Matx<double, 1, 5> &distort, cam2ground
//                             const std::vector<cv::Point2f> &im_pt, std::vector<cv::Point2f> &ground_pts)
// {
//     std::vector<cv::Point2f> undistorted;
//     cv::undistortPoints(im_pts, undistorted, K, distort);
//     cv::perspectiveTransform(undistorted, ground_pts, cam2ground_);
// }
