///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2025, STEREOLABS.
//
// All rights reserved.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////

// ZED include
#include "ClientPublisher.hpp"
#include "GLViewer.hpp"
#include "utils.hpp"

int main(int argc, char **argv) {
    std::string configuration_json, optional_custom_onnx_yolo_model;

    if (argc < 2 || (argc == 2 && !hasExtension(std::string(argv[1]), ".json"))) {
        // this file should be generated by using the tool ZED360
        std::cout << "Need a Configuration file in input" << std::endl;
        return 1;
    }

    configuration_json = std::string(argv[1]);
    if (argc > 2 && hasExtension(std::string(argv[2]), ".onnx")) {
        optional_custom_onnx_yolo_model = std::string(argv[2]);
        std::cout << "Enabling custom ONNX model with " << optional_custom_onnx_yolo_model << std::endl;
    }

    // Defines the Coordinate system and unit used in this sample
    constexpr sl::COORDINATE_SYSTEM COORDINATE_SYSTEM = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP;
    constexpr sl::UNIT UNIT = sl::UNIT::METER;

    // Read json file containing the configuration of your multi-camera setup.
    auto configurations = sl::readFusionConfigurationFile(configuration_json.c_str(), COORDINATE_SYSTEM, UNIT);

    if (configurations.empty()) {
        std::cout << "Empty configuration File." << std::endl;
        return EXIT_FAILURE;
    }

    Trigger trigger;

    // Check if the ZED camera should run within the same process or if they are running on the edge.
    std::vector<ClientPublisher> clients(configurations.size());
    int id_ = 0;
    std::map<int, std::string> svo_files;
    for (auto conf : configurations) {
        // if the ZED camera should run locally, then start a thread to handle it
        if (conf.communication_parameters.getType() == sl::CommunicationParameters::COMM_TYPE::INTRA_PROCESS) {
            std::cout << "Try to open ZED " << conf.serial_number << ".." << std::endl;
            clients[id_].optional_custom_onnx_yolo_model = optional_custom_onnx_yolo_model;
            auto state = clients[id_].open(conf.input_type, &trigger);
            if (!state) {
                std::cerr << "Could not open ZED: " << conf.input_type.getConfiguration() << ". Skipping..." << std::endl;
                continue;
            }

            if (conf.input_type.getType() == sl::InputType::INPUT_TYPE::SVO_FILE)
                svo_files.insert(std::make_pair(id_, conf.input_type.getConfiguration()));

            std::cout << ". ready !" << std::endl;

            id_++;
        }
    }

    // Synchronize SVO files in SVO mode
    bool enable_svo_sync = (svo_files.size() > 1);
    if (enable_svo_sync) {
        std::cout << "Starting SVO sync process..." << std::endl;
        std::map<int, int> cam_idx_to_svo_frame_idx = syncDATA(svo_files);

        for (auto &it : cam_idx_to_svo_frame_idx) {
            std::cout << "Setting camera " << it.first << " to frame " << it.second << std::endl;
            clients[it.first].setStartSVOPosition(it.second);
        }
    }

    // start camera threads
    for (auto &it : clients)
        it.start();

    // Now that the ZED camera are running, we need to initialize the fusion module
    sl::InitFusionParameters init_params;
    init_params.coordinate_units = UNIT;
    init_params.coordinate_system = COORDINATE_SYSTEM;
    init_params.verbose = true;

    // create and initialize it
    sl::Fusion fusion;
    fusion.init(init_params);

    // subscribe to every cameras of the setup to internally gather their data
    std::vector<sl::CameraIdentifier> cameras;
    for (auto &it : configurations) {
        sl::CameraIdentifier uuid(it.serial_number);
        // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup.
        auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose, it.override_gravity);
        if (state != sl::FUSION_ERROR_CODE::SUCCESS)
            std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl;
        else
            cameras.push_back(uuid);
    }

    // check that at least one camera is connected
    if (cameras.empty()) {
        std::cout << "no connections " << std::endl;
        return EXIT_FAILURE;
    }

    fusion.enablePositionalTracking();

    // Enable object detection
    sl::ObjectDetectionFusionParameters od_fusion_params;
    od_fusion_params.enable_tracking = false; // We do tracking per camera then fuse only in fusion.
                                              // This takes more resources than tracking in Fusion only but yields more accurate tracking.
    std::cout << "Enabling Fused Object detection" << std::endl;
    const sl::FUSION_ERROR_CODE err = fusion.enableObjectDetection(od_fusion_params);
    if (err != sl::FUSION_ERROR_CODE::SUCCESS) {
        std::cout << "Error: " << err << std::endl;
        return EXIT_FAILURE;
    }

    // creation of a 3D viewer
    GLViewer viewer;
    viewer.init(argc, argv, cameras);

    std::cout << "Viewer Shortcuts\n"
            << "\t- 'q': quit the application\n"
            << "\t- 'p': play/pause the GLViewer\n"
            << "\t- 'f': swicth on/off for fused bbox display\n"
            << "\t- 'r': swicth on/off for raw bbox display\n"
            << "\t- 's': swicth on/off for live point cloud display\n"
            << "\t- 'c': swicth on/off point cloud display with raw color\n"
            << std::endl;

    // fusion outputs
    std::unordered_map<sl::String /*Group name*/, sl::Objects> fused_objects;
    std::unordered_map<sl::CameraIdentifier, std::unordered_map<unsigned int /* instance id */, sl::Objects>> camera_raw_data;
    sl::FusionMetrics metrics;
    std::map<sl::CameraIdentifier, sl::Mat> views;
    std::map<sl::CameraIdentifier, sl::Mat> pointClouds;
    sl::Resolution low_res(512, 360);

    // run the fusion as long as the viewer is available.
    while (viewer.isAvailable()) {
        trigger.notifyZED();

        // run the fusion process (which gather data from all camera, sync them and process them)
        if (fusion.process() == sl::FUSION_ERROR_CODE::SUCCESS) {
            // Retrieve all the fused objects
            fusion.retrieveObjects(fused_objects);
            // for debug, you can retrieve the data sent by each camera
            for (const sl::CameraIdentifier& id : cameras) {
                // Retrieve all the raw objects of a given camera
                fusion.retrieveObjects(camera_raw_data[id], id);
                sl::Pose pose;
                if (fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id) == sl::POSITIONAL_TRACKING_STATE::OK)
                    viewer.setCameraPose(id.sn, pose.pose_data);

                auto state_view = fusion.retrieveImage(views[id], id, low_res);
                auto state_pc = fusion.retrieveMeasure(pointClouds[id], id, sl::MEASURE::XYZBGRA, low_res);

                if (state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS) {
                    viewer.updateCamera(id.sn, views[id], pointClouds[id]);
                }
            }

            // get metrics about the fusion process for monitoring purposes
            fusion.getProcessMetrics(metrics);
        }

        // Update the viewer with the fused objects and the raw objects
        viewer.updateObjects(fused_objects, camera_raw_data, metrics);

        while (!viewer.isPlaying() && viewer.isAvailable())
            sl::sleep_ms(10);
    }

    viewer.exit();

    trigger.running = false;
    trigger.notifyZED();

    for (auto &it : clients)
        it.stop();

    fusion.close();

    return EXIT_SUCCESS;
}
