//
// Created by weeksun on 17-4-25.
//

#include "driver.h"
#include <exception>
using namespace camport;
using namespace std;
int camera_control = 2;

bool camera_control_judge = true;

static char buffer[1024*1024*20];

DriverNodelet::DriverNodelet(){

}

DriverNodelet::~DriverNodelet (){

}

void DriverNodelet::cameraControl(std_msgs::Int32 msg){
  camera_control = msg.data;
}

void DriverNodelet::onInit (){
    init_thread_ = boost::thread(boost::bind(&DriverNodelet::onInitCamera, this));
}

void DriverNodelet::onInitCamera(){
    ros::NodeHandle &nh = getNodeHandle();        // topics
    ros::NodeHandle &param_nh = getPrivateNodeHandle(); // parameters

    // Allow remapping namespaces rgb, ir, depth, depth_registered
    image_transport::ImageTransport it(nh);
    ros::NodeHandle rgb_nh(nh, "rgb");
    image_transport::ImageTransport rgb_it(rgb_nh);
    ros::NodeHandle ir_nh(nh, "ir");
    image_transport::ImageTransport ir_it(ir_nh);
    ros::NodeHandle depth_nh(nh, "depth");
    image_transport::ImageTransport depth_it(depth_nh);
    ros::NodeHandle depth_registered_nh(nh, "depth_registered");
    image_transport::ImageTransport depth_registered_it(depth_registered_nh);


    rgb_frame_counter_ = depth_frame_counter_ = ir_frame_counter_ = 0;
    publish_rgb_ = publish_ir_ = publish_depth_ = true;

    // Camera TF frames
    param_nh.param("rgb_frame_id", rgb_frame_id_, std::string("/camport_rgb_optical_frame"));
    param_nh.param("depth_frame_id", depth_frame_id_, std::string("/camport_depth_optical_frame"));
    NODELET_INFO("rgb_frame_id = '%s' ", rgb_frame_id_.c_str());
    NODELET_INFO("depth_frame_id = '%s' ", depth_frame_id_.c_str());

    double fx,fy,cx,cy;
    param_nh.param("depth_camera_fx",fx,564.354);
    param_nh.param("depth_camera_fy",fy,564.354);
    param_nh.param("depth_camera_cx",cx,318.766);
    param_nh.param("depth_camera_cy",cy,254.873);

    default_depth_camera_intr_.data[0] = fx;
    default_depth_camera_intr_.data[4] = fy;
    default_depth_camera_intr_.data[2] = cx;
    default_depth_camera_intr_.data[5] = cy;
    default_depth_camera_intr_.data[8] = 1;


    NODELET_INFO("default depth camera intrinsic:");
    NODELET_INFO("          %f 0.0 %f",default_depth_camera_intr_.data[0],
                                       default_depth_camera_intr_.data[2]);
    NODELET_INFO("          0.0 %f %f",default_depth_camera_intr_.data[4],
                                       default_depth_camera_intr_.data[5]);
    NODELET_INFO("          %f %f %f",default_depth_camera_intr_.data[6],
                                       default_depth_camera_intr_.data[7],
                                        default_depth_camera_intr_.data[8]);

    param_nh.param("old_device",old_device_,false);

    TY_DEV_HANDLE hDevice;
    LOGD("=== Init lib");
    ASSERT_OK( TYInitLib() );
    TY_VERSION_INFO* pVer = (TY_VERSION_INFO*)buffer_;
    ASSERT_OK( TYLibVersion(pVer) );
    LOGD("     - lib version: %d.%d.%d", pVer->major, pVer->minor, pVer->patch);

    LOGD("=== Get device info");
    ASSERT_OK( TYGetDeviceNumber(&n_) );
    LOGD("     - device number %d", n_);

    TY_DEVICE_BASE_INFO* pBaseInfo = (TY_DEVICE_BASE_INFO*)buffer_;


    while(ros::ok()){
      ASSERT_OK( TYGetDeviceList(pBaseInfo, 100, &n_) );

      if(n_ == 0){
        LOGD("=== No device got");
        sleep(1);
      }else{
        break;
      }
    }


    LOGD("=== Open device 0");
    ASSERT_OK( TYOpenDevice(pBaseInfo[0].id, &hDevice) );

    if(developer_mode){
      LOGD("=== Enter Developer Mode");
      ASSERT_OK(TYEnterDeveloperMode(hDevice));
    }



    int32_t allComps=0;
    ASSERT_OK( TYGetComponentIDs(hDevice, &allComps) );

    if((allComps & TY_COMPONENT_POINT3D_CAM) && !old_device_){
      LOGD("=== Configure components, open point3d cam");
      // int32_t componentIDs = TY_COMPONENT_POINT3D_CAM;
      int32_t componentIDs = TY_COMPONENT_POINT3D_CAM;
      ASSERT_OK( TYEnableComponents(hDevice, componentIDs) );
      has_point3d_ = true;
    }


    if(allComps & TY_COMPONENT_RGB_CAM){


        int err = TYGetStruct(hDevice, TY_COMPONENT_RGB_CAM,
                              TY_STRUCT_CAM_INTRINSIC, (void*)&rgb_camera_intr_, sizeof(rgb_camera_intr_));
        if(err != TY_STATUS_OK){
            LOGE("Get camera RGB intrinsic failed: %s", TYErrorString(err));
            rgb_camera_intr_ = default_depth_camera_intr_;
        } else {
            has_color_ = true;
            LOGD("=== Has RGB camera, open RGB cam");
            ASSERT_OK( TYEnableComponents(hDevice, TY_COMPONENT_RGB_CAM) );
        }

        LOGD("=== Configure feature, set rgb resolution to 640x480.");
        TY_STATUS errs = TYSetEnum(hDevice, TY_COMPONENT_RGB_CAM, TY_ENUM_IMAGE_MODE, TY_IMAGE_MODE_640x480);
        ASSERT(errs == TY_STATUS_OK || errs == TY_STATUS_NOT_PERMITTED);
    }

    if(allComps & TY_COMPONENT_DEPTH_CAM){


      int err = TYGetStruct(hDevice, TY_COMPONENT_DEPTH_CAM,
                            TY_STRUCT_CAM_INTRINSIC,
                            (void*)&depth_camera_intr_, sizeof(depth_camera_intr_));
      if(err != TY_STATUS_OK){
          LOGE("Get camera depth intrinsic failed: %s", TYErrorString(err));
          has_depth_intrinsic_ = false;
          depth_camera_intr_ = default_depth_camera_intr_;
      } else {
          has_depth_intrinsic_ = true;
      }
      has_depth_ = true;
      LOGD("=== Has depth camera, open depth cam");
      ASSERT_OK( TYEnableComponents(hDevice, TY_COMPONENT_DEPTH_CAM) );

    }

    TY_STATUS ret = TYGetStruct(hDevice, TY_COMPONENT_RGB_CAM, TY_STRUCT_CAM_DISTORTION, &rgb_camera_dist_, sizeof(rgb_camera_dist_));
    ret |= TYGetStruct(hDevice, TY_COMPONENT_RGB_CAM, TY_STRUCT_CAM_INTRINSIC, &rgb_camera_intr_, sizeof(rgb_camera_intr_));
    if(ret!=TY_STATUS_OK){
      LOGD("Get camera distorion failed");
    }
    LOGD("=== Configure feature, set depth resolution to 640x480.");
    LOGD("Note: DM460 resolution feature is in component TY_COMPONENT_DEVICE,");
    LOGD("      other device may lays in some other components.");
    TY_STATUS err = TYSetEnum(hDevice, TY_COMPONENT_DEPTH_CAM, TY_ENUM_IMAGE_MODE, TY_IMAGE_MODE_640x480);
    ASSERT(err == TY_STATUS_OK || err == TY_STATUS_NOT_PERMITTED);

    LOGD("=== Prepare image buffer");
    int32_t frameSize;
    ASSERT_OK( TYGetFrameBufferSize(hDevice, &frameSize) );
    LOGD("     - Get size of framebuffer, %d", frameSize);
    ASSERT( frameSize >= 640*480*2 );

    LOGD("     - Allocate & enqueue buffers");
    char* frameBuffer[2];
    frameBuffer[0] = new char[frameSize];
    frameBuffer[1] = new char[frameSize];
    LOGD("     - Enqueue buffer (%p, %d)", frameBuffer[0], frameSize);
    ASSERT_OK( TYEnqueueBuffer(hDevice, frameBuffer[0], frameSize) );
    LOGD("     - Enqueue buffer (%p, %d)", frameBuffer[1], frameSize);
    ASSERT_OK( TYEnqueueBuffer(hDevice, frameBuffer[1], frameSize) );

    LOGD("=== Register callback");
    LOGD("Note: Callback may block internal data receiving,");
    LOGD("      so that user should not do long time work in callback.");
    LOGD("      To avoid copying data, we pop the framebuffer from buffer queue and");
    LOGD("      give it back to user, user should call TYEnqueueBuffer to re-enqueue it.");
    DepthRender render;
    CallbackData cb_data;
    cb_data.index = 0;
    cb_data.hDevice = hDevice;
    cb_data.render = &render;
    // ASSERT_OK( TYRegisterCallback(hDevice, frameHandler, &cb_data) );

    LOGD("Publish camera topics");
    pub_depth_ = depth_it.advertiseCamera("image_raw",1);
    pub_depth_registered_ = depth_registered_it.advertiseCamera("image_registered",1);
    pub_rgb_ = rgb_it.advertiseCamera("image_raw",1);
    pub_p3d_y_ = depth_it.advertise("p3d_y",1);
    pub_point3d_ = depth_nh.advertise<sensor_msgs::PointCloud2>(
          "point3d",1);

    ros::Subscriber camera_control_sub = nh.subscribe("/camera_control",20,&DriverNodelet::cameraControl,this);

    LOGD("=== Disable trigger mode");
    ASSERT_OK( TYSetBool(hDevice, TY_COMPONENT_DEVICE, TY_BOOL_TRIGGER_MODE, false) );

    LOGD("=== Start capture");
    ASSERT_OK( TYStartCapture(hDevice) );

    LOGD("=== While loop to fetch frame");

    TY_FRAME_DATA frame;

    while(ros::ok()){
        if(camera_control == 2 && camera_control_judge == true){
//         if(camera_control_judge == true){
//	   usleep(100000); 
//           ROS_INFO("STOP_CAMERA");
//           ASSERT_OK( TYStopCapture(hDevice) );
           camera_control = 0;
           camera_control_judge = false;
//           usleep(100000);
}
//        }

        if(camera_control == 1 && camera_control_judge == false){
//           ROS_INFO("OPEN_CAMERA");
//           ASSERT_OK( TYStartCapture(hDevice));
           camera_control = 0;
           camera_control_judge = true;
//           usleep(300000);
        }

        int err = TYFetchFrame(hDevice, &frame, -1);
        if( err != TY_STATUS_OK ){
//            LOGD("... Drop one frame");
            continue;
        }
        frameHandler(&frame, &cb_data);
        if(developer_mode){
          DEVELOPER_MODE_PRINT();
        }

    }

    ASSERT_OK( TYStopCapture(hDevice) );
    ASSERT_OK( TYCloseDevice(hDevice) );
    ASSERT_OK( TYDeinitLib() );
    // MSLEEP(10); // sleep to ensure buffer is not used any more
    delete frameBuffer[0];
    delete frameBuffer[1];

    LOGD("=== Main done!");
    return ;
}

void DriverNodelet::depth2Point(const cv::Mat& depth,cv::Mat& p3d){

  if(!depth.empty()){
    p3d = cv::Mat(depth.size(),CV_32FC3);
    uint16_t* dp = (uint16_t*)depth.data;
    float* pd = (float*)p3d.data;
    for(int i=0;i<depth.rows;i++){
      for(int j=0;j<depth.cols;j++){
//        if(dp[i * depth.cols + j]== 0){
//           dp[i * depth.cols + j] = 7000;
//         }
        pd[i * p3d.cols * 3 + j * 3] =dp[i * depth.cols + j] * (j - depth_camera_intr_.data[2])
                      / depth_camera_intr_.data[0];
        pd[i * p3d.cols * 3 + j * 3 + 1] =dp[i * depth.cols + j] * (i - depth_camera_intr_.data[5])
                      / depth_camera_intr_.data[4];
        pd[i * p3d.cols * 3 + j * 3 + 2] = dp[i * depth.cols + j];
      }
    }
  }else{
    p3d = cv::Mat();
  }
}

void DriverNodelet::frameHandler(TY_FRAME_DATA* frame, void* userdata){
  CallbackData* pData = (CallbackData*) userdata;
  //LOGD("=== Get frame %d", ++pData->index);

  cv::Mat depth, color, p3d;
//  parseFrame(*frame, &depth, 0, 0, &color, &p3d);
  parseFrame(*frame, &depth, 0, 0, 0, &p3d);
  ros::Time stamp = ros::Time::now();

//  if(!color.empty()){
//      publishRgbImage(color,stamp);
//      //cv::imshow("few",color);
//  }
  if(!color.empty()){
      cv::Mat undistort_result(color.size(), CV_8UC3);
      TY_IMAGE_DATA dst;
      dst.width = color.cols;
      dst.height = color.rows;
      dst.size = undistort_result.size().area() * 3;
      dst.buffer = undistort_result.data;
      dst.pixelFormat = TY_PIXEL_FORMAT_RGB;
      TY_IMAGE_DATA src;
      src.width = color.cols;
      src.height = color.rows;
      src.size = color.size().area() * 3;
      src.pixelFormat = TY_PIXEL_FORMAT_RGB;
      src.buffer = color.data;
      //undistort camera image
      //TYUndistortImage accept TY_IMAGE_DATA from TY_FRAME_DATA , pixel format RGB888 or MONO8
      //you can also use opencv API cv::undistort to do this job.
      ASSERT_OK(TYUndistortImage(&rgb_camera_intr_, &rgb_camera_dist_, NULL, &src, &dst));
      color = undistort_result;
      cv::Mat resizedColor;
      cv::resize(color, resizedColor, depth.size(), 0, 0, CV_INTER_LINEAR);
      //cv::imshow("color", resizedColor);
      if(!resizedColor.empty()){
       publishRgbImage(resizedColor,stamp);
      }
  }

  // do Registration
  cv::Mat newDepth;
  if(!p3d.empty() && !color.empty()) {
    ASSERT_OK( TYRegisterWorldToColor(pData->hDevice, (TY_VECT_3F*)p3d.data, 0
                , p3d.cols * p3d.rows, (uint16_t*)buffer, sizeof(buffer)
                ));
    newDepth = cv::Mat(color.rows, color.cols, CV_16U, (uint16_t*)buffer);
    cv::Mat resized_color;
    cv::Mat temp;
    //you may want to use median filter to fill holes in projected depth image or do something else here
    cv::medianBlur(newDepth,temp,5);
    newDepth = temp;
    //resize to the same size for display
    cv::resize(newDepth, newDepth, depth.size(), 0, 0, 0);
    cv::resize(color, resized_color, depth.size());
    cv::Mat depthColor = pData->render->Compute(newDepth);
    depthColor = depthColor / 2 + resized_color / 2;

    //cv::imshow("projected depth", depthColor);
    if(!depthColor.empty()){
      publishDepthImage(newDepth,stamp,true);
    }
  }

  if(!depth.empty()){

    publishDepthImage(depth,stamp,false);

    if(!has_point3d_){
      depth2Point(depth,p3d);
    }
  }



  if(!p3d.empty()){
    cv::Mat point3d(p3d.rows,p3d.cols,CV_16SC1);
    for(int i = 0;i<p3d.rows;i++){
        for(int j=0;j<p3d.cols;j++){
          //std::cout<<p3d.at<cv::Vec3f>(i,j)[1]<<","<<std::endl;
            point3d.at<short>(i,j) = (int)p3d.at<cv::Vec3f>(i,j)[1];
        }
    }
    sensor_msgs::ImagePtr msg_point3d = cv_bridge::CvImage(std_msgs::Header(), "16SC1", point3d).toImageMsg();
    pub_p3d_y_.publish(msg_point3d);
   //publishPoint3d(p3d,stamp);
  }

//  cv::waitKey(1);

  //LOGD("=== Callback: Re-enqueue buffer(%p, %d)", frame->userBuffer, frame->bufferSize);
  ASSERT_OK( TYEnqueueBuffer(pData->hDevice, frame->userBuffer, frame->bufferSize) );
}


void DriverNodelet::publishRgbImage(const cv::Mat& iFrame, ros::Time time) const {

  std_msgs::Header header;
  header.frame_id = rgb_frame_id_;
  header.stamp = time;
  cv_bridge::CvImage cv_image(header,sensor_msgs::image_encodings::BGR8,iFrame);

  pub_rgb_.publish(cv_image.toImageMsg(),
                   getRGBCameraInfo(iFrame.cols, iFrame.rows,time));
}

void DriverNodelet::publishDepthImage(const cv::Mat &iFrame, ros::Time time, bool reg) const{

    std_msgs::Header header;
    header.frame_id = depth_frame_id_;
    header.stamp = time;
    cv_bridge::CvImage cv_image(header,sensor_msgs::image_encodings::MONO16,iFrame);

    if(reg){
       //pub_depth_registered_.publish(depth_msg.toImageMsg(),
         //                             getRGBCameraInfo(iFrame->getWidth(),
           //                                            iFrame->getHeight(),time));
        /*sensor_msgs::Image container;
        cv_bridge::CvImage depthColor_;
        depthColor_.encoding = sensor_msgs::image_encodings::MONO8;
        depthColor_.image = iFrame;
        depthColor_.toImageMsg(container);*/
        //pub_depth_registered_.publish(container);
        pub_depth_registered_.publish(cv_image.toImageMsg(),
                                 getRGBCameraInfo(iFrame.cols,
                                                  iFrame.rows,time));
    }else{
        pub_depth_.publish(cv_image.toImageMsg(),
                           getDepthCameraInfo(iFrame.cols,
                                            iFrame.rows,time));
    }
}


void DriverNodelet::publishPoint3d(const cv::Mat& iFrame,ros::Time time) const{




    pcl::PointCloud<pcl::PointXYZ> pc;
   //pcl::PCLPointCloud2::Ptr pc_(new pcl::PCLPointCloud2);
   //pcl::PCLPointCloud2::Ptr _pc_(new pcl::PCLPointCloud2);
   //boost::shared_ptr<pcl::PointCloud<pcl::PointXYZ>> pcl_cloud(new pcl::PointCloud<pcl::PointXYZ>());
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
   //boost::shared_ptr<pcl::PointCloud<pcl::PointXYZ>> pcl_vg_cloud(new pcl::PointCloud<pcl::PointXYZ>());
    //pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filered(new pcl::PointCloud<pcl::PointXYZ>);
//  //boost::shared_ptr<pcl::PointCloud<pcl::PointXYZ>> pcl_ror_cloud(new pcl::PointCloud<pcl::PointXYZ>());
    //pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZ>);
    int num = 0;

    pc.header.frame_id = depth_frame_id_;
    pc.header.stamp = pcl_conversions::toPCL(time);
    pc.is_dense=false;
    pc.width = (iFrame.cols);
    pc.height = iFrame.rows;
  //pc = *pcl_cloud_ptr;
  //pcl_cloud_ptr = pc.makeShared();

  for(int i=0;i<iFrame.rows;i++){
     for(int j=0;j<(iFrame.cols);j++){
      cv::Vec3f p = iFrame.at<cv::Vec3f>(i,j);
      p /= 1000.0;

//      if(p[0]==0){
//          num++;
//          p[0] = 1;
//          p[1] = 1;
//          p[2] = 10;
//      }
      pc.points.push_back(pcl::PointXYZ(p[0],p[1],p[2]));

    }
  }
     //std::cout<<num<<std::endl;
    //std::cout<<num<<std::endl;
  //std::cout<<pc.width<<std::endl;
 // std::cout<<cloud->width<<std::endl;
    //std::cout<<iFrame.cols<<std::endl;
   //pcl::io::savePCDFile("/home/ubuntu/catkin_ty/test_pcd_1.pcd",*cloud);

//     pcl::PCDReader reader;
//     reader.read<pcl::PointXYZ>("/home/ubuntu/catkin_ty/test_pcd.pcd_",*cloud);
//    //std::cout<<"no see points: "<<num<<std::endl;
      //去除明显离群点（由噪声引入的）
//       pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
//       sor.setInputCloud (cloud);
//       sor.setMeanK (50);
//       sor.setStddevMulThresh (1.0);
//       sor.filter(*cloud_filered);

//       std::cout<<cloud_filered->width<<std::endl;


      //再次去除明显离群点。
//      pcl::RadiusOutlierRemoval<pcl::PointXYZ> outrem;
//      outrem.setInputCloud(pcl_cloud_ptr);
//      outrem.setRadiusSearch(0.3);
//      outrem.setMinNeighborsInRadius (3);
//      //outrem.filter (*pcl_vg_cloud_ptr);

//       std::cout<<"pc.empty()"<<std::endl;

//     sensor_msgs::PointCloud2 p3d_msg;
//     pcl::toROSMsg(pc,p3d_msg);
//     //pub_point3d_.publish(p3d_msg);

//     pcl::PCLPointCloud2* cloud1 = new pcl::PCLPointCloud2;
//     pcl::PCLPointCloud2::Ptr cloudPtr(cloud1);
//     pcl::PCLPointCloud2* cloud_filtered = new pcl::PCLPointCloud2;
//     pcl::PCLPointCloud2::Ptr cloudPtr_(cloud_filtered);
//     pcl::PCLPointCloud2* cloud_out = new pcl::PCLPointCloud2;
//     pcl::PCLPointCloud2::Ptr cloudPtr__(cloud_out);



//    // Convert to PCL data type
//     pcl_conversions::toPCL(p3d_msg, *cloud1);

//     //std::cout<<cloudPtr->width<<std::endl;
//     // Perform the actual filtering
//     pcl::VoxelGrid<pcl::PCLPointCloud2> sor;
//     sor.setInputCloud (cloudPtr);
//     sor.setLeafSize (0.025, 0.025, 0.025);
//     sor.filter (*cloudPtr_);

//     //pcl::io::savePCDFile("/home/ubuntu/catkin_ty/test_pcd_2.pcd",*cloudPtr_);

//     //pcl::fromPCLPointCloud2 (*cloudPtr_, *cloud_filered);

////     //std::cout<<cloud_filered->width<<std::endl;
//     pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2> outrem;
//     outrem.setInputCloud(cloudPtr_);
//     outrem.setMeanK (50);
//     outrem.setStddevMulThresh (1.0);
//     outrem.filter(*cloud_out);

     //pcl::io::savePCDFile("/home/ubuntu/catkin_ty/test_pcd_3.pcd",*cloud_out);



     // Convert to ROS data type
     sensor_msgs::PointCloud2 output;
     //pcl::toROSMsg(*cloudPtr__,output);
     pcl::toROSMsg(pc,output);


     //pcl_conversions::toPCL(p3d_msg, pc_);
     pub_point3d_.publish(output);
}

sensor_msgs::CameraInfoPtr DriverNodelet::getRGBCameraInfo(int width, int height,ros::Time stamp) const {
     sensor_msgs::CameraInfoPtr info = boost::make_shared<sensor_msgs::CameraInfo>();
    info->header.frame_id = rgb_frame_id_;
    info->header.stamp = stamp;
    //info->header.seq = rgb_seq;
    info->width  = width;
    info->height = height;

    // No distortion
    info->D.resize(5, 0.0);
    info->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB;
    info->D[0] = 0;
    info->D[1] = 0;
    info->D[2] = 0;
    info->D[3] = 0;
    info->D[4] = 0;
    // Simple camera matrix: square pixels (fx = fy), principal point at center
    info->K.assign(0.0);
    info->K[0] = rgb_camera_intr_.data[0]; //fx
    info->K[4] = rgb_camera_intr_.data[4]; // fy
    info->K[2] = rgb_camera_intr_.data[2];     // cx
    info->K[5] = rgb_camera_intr_.data[5];     // cy
    info->K[8] = 1.0;

    // No separate rectified image plane, so R = I
    info->R.assign(0.0);
    info->R[0] = info->R[4] = info->R[8] = 1.0;

    // Then P=K(I|0) = (K|0)
    info->P.assign(0.0);
    info->P[0]  = rgb_camera_intr_.data[0]; //fx
    info->P[5] = rgb_camera_intr_.data[4]; // fy
    info->P[2]  = rgb_camera_intr_.data[2];     // cx
    info->P[6]  = rgb_camera_intr_.data[5];     // cy
    info->P[10] = 1.0;

    return info;
}

sensor_msgs::CameraInfoPtr DriverNodelet::getDepthCameraInfo(int width, int height,ros::Time stamp) const{
  sensor_msgs::CameraInfoPtr info = boost::make_shared<sensor_msgs::CameraInfo>();
  info->header.frame_id = depth_frame_id_;
  info->header.stamp = stamp;
  //info->header.seq = rgb_seq;
  info->width  = width;
  info->height = height;

  // No distortion
  info->D.resize(5, 0.0);
  info->distortion_model = sensor_msgs::distortion_models::PLUMB_BOB;
  info->D[0] = 0;
  info->D[1] = 0;
  info->D[2] = 0;
  info->D[3] = 0;
  info->D[4] = 0;
  // Simple camera matrix: square pixels (fx = fy), principal point at center
  info->K.assign(0.0);
  info->K[0] = depth_camera_intr_.data[0]; //fx
  info->K[4] = depth_camera_intr_.data[4]; // fy
  info->K[2] = depth_camera_intr_.data[2];     // cx
  info->K[5] = depth_camera_intr_.data[5];     // cy
  info->K[8] = 1.0;

  // No separate rectified image plane, so R = I
  info->R.assign(0.0);
  info->R[0] = info->R[4] = info->R[8] = 1.0;

  // Then P=K(I|0) = (K|0)
  info->P.assign(0.0);
  info->P[0]  = depth_camera_intr_.data[0]; //fx
  info->P[5] = depth_camera_intr_.data[4]; // fy
  info->P[2]  = depth_camera_intr_.data[2];     // cx
  info->P[6]  = depth_camera_intr_.data[5];     // cy
  info->P[10] = 1.0;

  return info;
}





// Register as nodelet
#include <pluginlib/class_list_macros.h>
PLUGINLIB_DECLARE_CLASS (camport, driver, camport::DriverNodelet, nodelet::Nodelet);
