/*******************************************************************************
* This class is used to open video files and camera devices. It uses ffmpeg
* libraries for video files and V4L2 and libdc1394 for camera devices.
* 
*
* To compile this code, you must add the libs -lavformat, -lavcodec, -lavutil,
* lswscale, -ldc1394, -lraw1394 to g++ compiler.
* 
*
* Licensed under GPL Version 3 license (/license.txt).
* Author: Eder A. Perez (eder.perez@ice.ufjf.br)
*******************************************************************************/

#include "cvvideo.h"
#include <sys/ioctl.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>

#include <linux/videodev2.h>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}




/*********************************
*             ERRORS             *
**********************************/
#define CV_SUCCESS                         0x0000
#define CV_ERROR_FILE_NOT_FOUND            0x0001
#define CV_ERROR_FILE_NOT_CREATED          0x0006
#define CV_ERROR_UNSUPPORTED_FORMAT        0x0004
#define CV_ERROR_VIDEO_STRM_NOT_FOUND      0x0005
#define CV_ERROR_CODEC_NOT_FOUND           0x0006
#define CV_ERROR_CODEC_NOT_OPENED          0x0007
#define CV_ERROR_FRAME_NOT_ALLOCATED       0x0008
#define CV_ERROR_STRM_INFO_NOT_FOUND       0x0009
#define CV_ERROR_CONVERT_FRAME_TO_RGB      0X000A
#define CV_ERROR_EOF                       0x000B
#define CV_ERROR_INPUT_OPENED              0x000C
#define CV_ERROR_CAMERA_NOT_OPENED         0x000D
#define CV_ERROR_NO_SIGNAL                 0x000E
#define CV_ERROR_INVALID_VIDEO_LOADING     0x000F
#define CV_WARNING_PRIORITY_NOT_SET        0x0100
#define CV_ERROR_INCOMPATIBLE_DEVICE       0X0011
#define CV_WARNING_SET_DATA_FORMAT_ERROR   0x0200


#define ROWSIZE(width, bpp) ( ((((width) * (bpp)) + 31) & ~31) >> 3 )
#define BYTERANGE(a)( ( (a) > 255 )? 255 : ( ( (a) < 0 )? 0 : (a) ) )



/*********************************
*                                *
*     METHODS IMPLEMENTATION     *
*                                *
**********************************/

// Constructor
CVVideo::CVVideo(): errorcode(0), format_ctx(NULL), codec_ctx(NULL), input_opened(false),
frame(NULL), frameRGB(NULL), buffer(NULL), width(0), height(0),
bitrate(0), fps(0) {
  
}


// Destructor
CVVideo::~CVVideo() {

  switch(this->input_type) {

    case CV_INPUT_V4L2:
      if(read_type = V4L2_CAP_STREAMING) { // Used streaming I/O
        munmap(buffer, buffsize);
        int t = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        ioctl(fd, VIDIOC_STREAMOFF, &t);
      }
      else free(buffer); // Used read() function
  
      close(fd);
      break;
    
    case CV_INPUT_IEEE1394:
      // Release the buffer
      dc1394_capture_enqueue(camera, frame_1394);
      
      // Stop transmission
      dc1394_video_set_transmission(camera, DC1394_OFF);

      // Stop capture
      dc1394_capture_stop(camera);
      break;
      
    case CV_INPUT_VIDEOFILE:
      // Free the RGB image
      free(buffer);
      av_free(frameRGB);

      // Free the YUV frame
      av_free(frame);

      // Close the codec
      avcodec_close(codec_ctx);

      // Close the video file
      av_close_input_file(format_ctx);
      break;
    
    default: break;

  }
  
}



/* Copies the current frame to data
* 
* @params:
*   data: array of bytes.
*
* @return: returns true if device was opened, false otherwise.
*       
*/
bool CVVideo::getFrame(CVubyte* data) {
  return (this->*get_frame_ptr)(data);
}



/* Open a v4l2 device. It tries to set device priority to highest and also retrieves
* camera's capabilities (stored in capability private attribute).
* @params:
*   device: path of device (e.g. /dev/video0)
*   params: used to set camera parameters
*
* @return: returns true if device was opened, false otherwise.
*       
*/
bool CVVideo::openV4L2(const char* device, CVv4l2params_t params) {
  
  // Try to open a camera
    // O_NONBLOCK flag doesn't block read() and VIDIOC_DQBUF ioctl
    // O_RDWR flag blocks read() and VIDIOC_DQBUF ioctl until data become available
  fd = open(device, O_RDWR);
  if( fd == -1) {
    errorcode = CV_ERROR_CAMERA_NOT_OPENED;
    return false;
  }
  
  
  // Try to set priority to highest (only this fd can change device properties)
  if( ioctl(fd, VIDIOC_S_PRIORITY, V4L2_PRIORITY_RECORD) == -1 )
    errorcode = CV_WARNING_PRIORITY_NOT_SET;
 
 
  // Get device information
  memset(&capability, 0, sizeof(v4l2_capability));
  if( ioctl(fd, VIDIOC_QUERYCAP, &capability) == -1 ) {
    errorcode = CV_ERROR_INCOMPATIBLE_DEVICE;
    close(fd);
    return false;
  }
 
  // If device can't capture video, abort
  if( !(capability.capabilities & V4L2_CAP_VIDEO_CAPTURE) ) {
    errorcode = CV_ERROR_INCOMPATIBLE_DEVICE;
    close(fd);
    return false;
  }
  
  // If device is offline, abort
  memset(&input, 0, sizeof(v4l2_input));
  ioctl(fd, VIDIOC_G_INPUT, &(input.index));
  ioctl(fd, VIDIOC_ENUMINPUT, &input);
  if( (input.status & V4L2_IN_ST_NO_POWER) || (input.status & V4L2_IN_ST_NO_SIGNAL) ) {
    errorcode = CV_ERROR_NO_SIGNAL;
    close(fd);
    return false;
  }
  
  
  // Set parameters and returns
  input_opened = true;
  return init_v4l2_camera(params);
	
}



/* Set the v4l2 device parameters
* @params:
*   params: used to set camera parameters
*
* @return: returns true in success, false otherwise.
*       
*/
bool CVVideo::init_v4l2_camera(CVv4l2params_t params) {

  // Data format negotiation
  struct v4l2_format format;
  memset(&format, 0, sizeof(v4l2_format));
  format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  ioctl(fd, VIDIOC_G_FMT, &format);
  
  // Try to set user's specific format
  if(params.width  != 0) format.fmt.pix.width  = (unsigned int) params.width;
  if(params.height != 0) format.fmt.pix.height = (unsigned int) params.height;
  if(params.device_fmt != CV_V4L2_DEVFMT_DEFAULT) format.fmt.pix.pixelformat = params.device_fmt;
  this->retrieve_fmt = retrieve_fmt;
  
  if( ioctl(fd, VIDIOC_S_FMT, &format) == -1 )
    errorcode = CV_WARNING_SET_DATA_FORMAT_ERROR;

  ioctl(fd, VIDIOC_G_FMT, &format);
  this->width       = format.fmt.pix.width;
  this->height      = format.fmt.pix.height;
  this->bpp         = getBpp(format.fmt.pix.pixelformat);
  this->rowsize     = format.fmt.pix.bytesperline;
  this->buffsize    = format.fmt.pix.sizeimage;
  this->pixelformat = format.fmt.pix.pixelformat;


  // Streaming parameters (try to set fps)
  struct v4l2_streamparm streamparm;
  memset(&streamparm, 0, sizeof(v4l2_streamparm));
  streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  if(ioctl(fd, VIDIOC_G_PARM, streamparm) != -1) {
    streamparm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
    struct v4l2_fract timeperframe;
    timeperframe.numerator = 1; timeperframe.denominator = (unsigned int) params.fps;
    streamparm.parm.capture.timeperframe = timeperframe;
    streamparm.parm.capture.readbuffers = 2; // Use two buffers for read() mode
    ioctl(fd, VIDIOC_S_PARM, streamparm);
    if(ioctl(fd, VIDIOC_G_PARM, streamparm) != -1) { // Gets the final fps set
      this->fps = streamparm.parm.capture.timeperframe.denominator;
    }
  }


  // Set retrieving frame function (it depends on the capabilities)
  // Use read() function
  if( capability.capabilities & V4L2_CAP_READWRITE) {
    this->buffer = (CVubyte*) malloc(sizeof(CVubyte)*this->buffsize);
    this->get_frame_ptr = &CVVideo::get_frame_v4l2read;
    this->read_type = V4L2_CAP_READWRITE;
  }
  else { // Use memory map
    // Allocate buffer
    struct v4l2_requestbuffers requestbuffers;
    memset(&requestbuffers, 0, sizeof(v4l2_requestbuffers));
    requestbuffers.count  = 2;
    requestbuffers.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    requestbuffers.memory = V4L2_MEMORY_MMAP;
    if( ioctl(fd, VIDIOC_REQBUFS, &requestbuffers) == -1) {
      errorcode = CV_ERROR_INCOMPATIBLE_DEVICE;
      return false;
    }
    
    // Query the status of a buffer
    memset(&(this->bf), 0, sizeof(v4l2_buffer));
    this->bf.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    this->bf.index = 0;
    if( ioctl(fd, VIDIOC_QUERYBUF, &(this->bf)) == -1) {
      errorcode = CV_ERROR_INCOMPATIBLE_DEVICE;
      return false;
    }

    // Map device memory into the buffer
    this->buffsize = this->bf.length;
    this->buffer = (CVubyte*) mmap(NULL, bf.length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, bf.m.offset);

    // Start streaming
    if( ioctl(fd, VIDIOC_STREAMON, &(requestbuffers.type)) == -1 ) {
      errorcode = CV_ERROR_INCOMPATIBLE_DEVICE;
      return false;
    }
        
    // Set getFrame() function
    this->get_frame_ptr = &CVVideo::get_frame_v4l2strm;
    this->read_type = V4L2_CAP_STREAMING;
  }
  
  //Set conversion pixel function
  set_conv_function();

  this->input_type = CV_INPUT_V4L2;
  return true;
}



/** Set the function to convert data (if retrieving and device formats are different)
 *       
 */
void CVVideo::set_conv_function() {
  convert_buffer_data = NULL;
  
  //switch(this->pixelformat) {

    /****************************
    *  YUV DEVICE PIXEL FORMAT  *
    ****************************/
    /*case V4L2_PIX_FMT_YUYV:
      switch(this->retrieve_fmt) {
        case CV_RETRFMT_RGB24:
          convert_buffer_data = &CVVideo::YUYV_to_RGB24;
          break;
        case CV_RETRFMT_YUV24:
          break;
        case CV_RETRFMT_GREY8:
          convert_buffer_data = &CVVideo::YUYV_to_GREY24;
          break;
        default: break;
      }
      break;*/
    
    
    
    /****************************
    *  JPEG DEVICE PIXEL FORMAT *
    ****************************/
    /*case V4L2_PIX_FMT_PJPG:
      switch(this->retrieve_fmt) {
        case CV_FMT_RGB24:
          convert_buffer_data = &CVVideo::JPEG_to_rgb24;
          break;
        case CV_FMT_YUV:
        case CV_FMT_GREY:
          //convert_buffer_data = &CVVideo::JPEG_to_GREY;
          break;
      }
      break;*/
    
    
    //default: break;    
  //}

}



/* Gets frame from v4l2 device using read()
* @params:
*   data: array of bytes.
*
* @return: returns true in success, false otherwise.
*       
*/
bool CVVideo::get_frame_v4l2read(CVubyte* data) {
  if( read(fd, this->buffer, this->buffsize) == -1) return false;

  // If necessary to convert format data
  if(convert_buffer_data)
    return (this->*convert_buffer_data)(data);
    
  memcpy(data, this->buffer, sizeof(CVubyte)*this->buffsize);
  return true;

}



/* Gets frame from v4l2 device using streaming
* @params:
*   data: array of bytes.
*
* @return: returns true in success, false otherwise.
*       
*/
bool CVVideo::get_frame_v4l2strm(CVubyte* data) {

  ioctl(fd, VIDIOC_QBUF, &bf);
  ioctl(fd, VIDIOC_DQBUF, &bf);
  
  // If necessary to convert format data
  if(convert_buffer_data)
    return (this->*convert_buffer_data)(data);
    
  memcpy(data, this->buffer, sizeof(CVubyte)*this->buffsize);
  return true;

}










































/* Opens a video file
* @params:
*   path: path of the file.
*
* @return: returns true in success, false otherwise.
*       
*/
bool CVVideo::openVideoFile(const char* path) {
  
  // Return error if a file/device is already opened
  if(input_opened) {
    errorcode = CV_ERROR_INVALID_VIDEO_LOADING;
    return false;
  }

  
  // Register all formats and codecs
  av_register_all();
  
  // Open video file (autodetect file format, buffer size and format parameters)
  if(av_open_input_file(&format_ctx, path, NULL, 0, NULL) != 0) {
    errorcode = CV_ERROR_FILE_NOT_FOUND;
    return false;
  }
  
  // Retrieve stream information
  if(av_find_stream_info(format_ctx) < 0) {
    errorcode = CV_ERROR_STRM_INFO_NOT_FOUND;
    return false;
  }
  
  // Find the first video stream
  video_stream = -1;
  for(int i = 0; i < format_ctx->nb_streams; ++i)
      if(format_ctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
          video_stream = i;
          break;
      }

  if(video_stream == -1) {
    errorcode = CV_ERROR_VIDEO_STRM_NOT_FOUND;
    return false;
  }
  
  // Get a pointer to the codec context for the video stream
  codec_ctx = format_ctx->streams[video_stream]->codec;
  
  // Find the decoder for the video stream
  AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id);
  if(codec == NULL) {
    errorcode = CV_ERROR_CODEC_NOT_FOUND;
    return false; 
  }
  
  // Open codec
  if(avcodec_open(codec_ctx, codec) < 0) {
    errorcode = CV_ERROR_CODEC_NOT_OPENED;
    return false; 
  }
  
  // Hack to correct wrong frame rates that seem to be generated by some codecs
  if( (codec_ctx->time_base.num > 1000) && (codec_ctx->time_base.den == 1) )
    codec_ctx->time_base.den=1000;
    
  // Allocate video frame
  frame = avcodec_alloc_frame();
  if(frame == NULL) {
    errorcode = CV_ERROR_FRAME_NOT_ALLOCATED;
    return false;
  }
  
  // Allocate an AVFrame structure
  frameRGB = avcodec_alloc_frame();
  if(frameRGB == NULL) {
    errorcode = CV_ERROR_FRAME_NOT_ALLOCATED;
    return false;
  }
  
  // Determine required buffer size and allocate buffer
  int num_bytes = avpicture_get_size(PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height);
  buffer = (CVubyte*) malloc(num_bytes);
  
  // Assign appropriate parts of buffer to image planes in frameRGB
  avpicture_fill((AVPicture*)frameRGB, buffer, PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height);


  // Set attributes
  this->width   = codec_ctx->width;
  this->height  = codec_ctx->height;
  this->bpp     = 24; // Frames from files are always converted to 24bpp
  this->bitrate = format_ctx->bit_rate;
  this->fps = (float) 1.0f/(float)av_q2d(format_ctx->streams[video_stream]->time_base);
  
  // Set retrieving frame function
  this->get_frame_ptr = &CVVideo::get_frame_videofile;

  input_opened = true;
  return true;
}




/** Gets current frame from video files. Frames from video files are in RGB with
* 24bpp.
* @params:
*   data: array of bytes.
*
* @return: returns true in success, false otherwise.
*       
**/
bool CVVideo::get_frame_videofile(CVubyte* data) {
  int frameFinished;
  AVPacket packet;

  if(av_read_frame(format_ctx, &packet) >= 0) {
    // Is this a packet from the video stream?
    if(packet.stream_index == video_stream) {
      // Decode video frame
      avcodec_decode_video(codec_ctx, frame, &frameFinished, packet.data, packet.size);
      
      // Did we get a video frame?
      if(frameFinished) {
        static struct SwsContext *img_convert_ctx;
        
        // Convert the image into RGB
	      if(img_convert_ctx == NULL) {
	        int w = codec_ctx->width;
		      int h = codec_ctx->height;
		      img_convert_ctx = sws_getContext(w, h, codec_ctx->pix_fmt, w, h,
		                                       PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
          
          if(img_convert_ctx == NULL) {
			      fprintf(stderr, "Cannot initialize the conversion context!\n");
			      errorcode = CV_ERROR_CONVERT_FRAME_TO_RGB;
			      return false;
		      }
		      
	      }
	      
	      // Applies the convertion
	      sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, 
			            codec_ctx->height, frameRGB->data, frameRGB->linesize);

			  // Copy the frame to data
			  for(int y = 0; y < this->height; ++y)
			    memcpy((data + (this->height-1 - y)*frameRGB->linesize[0]), frameRGB->data[0]+y*frameRGB->linesize[0], codec_ctx->width*3);
			    
      }
      // Free the packet that was allocated by av_read_frame
      av_free_packet(&packet);
    }
  }
  else {
    errorcode = CV_ERROR_EOF;
    return false;    
  }

  return true;  
}



/* Return the bpp based on the pixel format
*  @param:
*    pxlfmt: pixel format.
*
*  @return: returns the bpp value.
*/
int CVVideo::getBpp(CVuint32 pxlfmt) {
  switch(pxlfmt) {
    // 8bpp
    case V4L2_PIX_FMT_PAL8:
    case V4L2_PIX_FMT_RGB332:
    case V4L2_PIX_FMT_SBGGR8:
    case V4L2_PIX_FMT_GREY:
      return 8;
      break;
      
    // 16bpp
    case V4L2_PIX_FMT_RGB444:
    case V4L2_PIX_FMT_RGB555:
    case V4L2_PIX_FMT_RGB565:
    case V4L2_PIX_FMT_RGB555X:
    case V4L2_PIX_FMT_RGB565X:
    case V4L2_PIX_FMT_SBGGR16:
    case V4L2_PIX_FMT_YUV444:
    case V4L2_PIX_FMT_YUV555:
    case V4L2_PIX_FMT_YUV565:
    case V4L2_PIX_FMT_Y16:      return 16;
    
    // 24bpp
    case V4L2_PIX_FMT_BGR24:
    case V4L2_PIX_FMT_RGB24:
    case V4L2_PIX_FMT_YUYV:
    case V4L2_PIX_FMT_UYVY:
    case V4L2_PIX_FMT_Y41P:
    case V4L2_PIX_FMT_YVU420:
    case V4L2_PIX_FMT_YUV420:   return 24;
    
    // 32bpp
    case V4L2_PIX_FMT_BGR32:
    case V4L2_PIX_FMT_RGB32:
    case V4L2_PIX_FMT_YUV32:    return 32;

    default: break;
  }
  
  return -1;
}



/** Open an ieee1394 camera and setup initial parameters
*  @param:
*    params: initial parameters (like framerate, width and height, etc.)
*
*  @return: true if the camera was successfully opened, false otherwise.
**/
bool CVVideo::openIEEE1394(CVieee1394params_t params) {
  dc1394error_t err;
  dc1394camera_list_t *list; // List of all available cameras
  
  // Initialize a context in which libdc1394 can be used
  dc1394 = dc1394_new();
  if(!dc1394) {
    errorcode = CV_ERROR_CAMERA_NOT_OPENED;
    return false;
  }
  
  // Returns the list of cameras available on the computer
  err = dc1394_camera_enumerate(dc1394, &list);
  if(err != DC1394_SUCCESS) {
    errorcode = CV_ERROR_CAMERA_NOT_OPENED;
    return false;
  }
  
  // If num == 0 or camera_id is invalid returns an error
  if( (list->num == 0) || (params.camera_id < 0) || (params.camera_id >= list->num)) {
    errorcode = CV_ERROR_CAMERA_NOT_OPENED;
    return false;
  }
  
  // Choose a camera to work with
  camera = dc1394_camera_new(dc1394, list->ids[params.camera_id].guid);
  if (!camera) {
    errorcode = CV_ERROR_CAMERA_NOT_OPENED;
    return false;
  }
  dc1394_camera_free_list(list);
  
  // Setup capture
  err = dc1394_capture_setup(camera, CV_DMA_BUFFERS, DC1394_CAPTURE_FLAGS_DEFAULT);
  if(err != DC1394_SUCCESS) {
    //ERROR;
    return false;
  }


  // Set parameters here
  dc1394video_mode_t curr_video_mode;
  err = dc1394_video_get_mode(camera, &curr_video_mode);
  
  // If video mode is scalable
  if(curr_video_mode >= DC1394_VIDEO_MODE_FORMAT7_0) {
    dc1394color_coding_t curr_color_coding;
    
    err = dc1394_format7_set_image_size(camera, curr_video_mode, params.width, params.height);
    err = dc1394_video_set_framerate(camera, params.fps);
    err = dc1394_format7_get_color_coding(camera, curr_video_mode, &curr_color_coding);
  }
  
  
  // Starts/stops the isochronous data transmission.
  err = dc1394_video_set_transmission(camera, DC1394_ON);
  if(err != DC1394_SUCCESS) {
    //ERROR;
    return false;
  }
  
  input_opened = true;
  return true;

}





bool CVVideo::get_frame_ieee1394(CVubyte* data) {
  dc1394error_t err;
  
  // Captures a video frame. The returned struct contains the image buffer,
  // among others. This image buffer SHALL NOT be freed, as it represents an area
  // in the memory that belongs to the system. 
  err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame_1394);
  if(err != DC1394_SUCCESS) {
    //ERROR;
    return false;
  }
  
  // Enqueue new frame
  dc1394_capture_enqueue(camera, frame_1394);

  return true;

}




















bool CVVideo::YUYV_to_RGB24(CVubyte* data) {
  int rsize = ROWSIZE(this->width, 24);
  int k, h = 0;
  for(int i = this->height; --i;) {
    k = 0; ++h;
    for(int j = 0; j < this->width; j += 2) {
      int l = 3*j;
      CVubyte y1 = buffer[h*rowsize + k++];
      CVubyte u  = buffer[h*rowsize + k++];
      CVubyte y2 = buffer[h*rowsize + k++];
      CVubyte v  = buffer[h*rowsize + k++];
      
      data[i*rsize + l]   = (CVubyte)BYTERANGE( 1.164f*(float)(y1 - 16.f) + 1.596f*(float)(v - 128.f) );
      data[i*rsize + l+1] = (CVubyte)BYTERANGE( 1.164f*(float)(y1 - 16.f) - 0.813f*(float)(v - 128.f) - 0.391f*(float)(u - 128.f) );
      data[i*rsize + l+2] = (CVubyte)BYTERANGE( 1.164f*(float)(y1 - 16.f) + 2.018f*(float)(u - 128.f) );
      
      data[i*rsize + l+3] = (CVubyte)BYTERANGE( 1.164f*(float)(y2 - 16.f) + 1.596f*(float)(v - 128.f) );
      data[i*rsize + l+4] = (CVubyte)BYTERANGE( 1.164f*(float)(y2 - 16.f) - 0.813f*(float)(v - 128.f) - 0.391f*(float)(u - 128.f) );
      data[i*rsize + l+5] = (CVubyte)BYTERANGE( 1.164f*(float)(y2 - 16.f) + 2.018f*(float)(u - 128.f) );

    }
  }
}



bool CVVideo::YUYV_to_GREY24(CVubyte* data) {
  int rsize = ROWSIZE(this->width, 24);
  int k, h = 0;
  for(int i = this->height; --i;) {
    k = 0; ++h;
    for(int j = 0; j < this->width; ++j) {
      CVubyte tmp = buffer[h*rowsize + 2*(k++)];
      int l = j*3;
      data[i*rsize + l]   = tmp;
      data[i*rsize + l+1] = tmp;
      data[i*rsize + l+2] = tmp;
    }
  }
  
  return true;
  
}






