#include "getImage.h"
#include<QPixmap>
#include<QImageReader>
#include<QBuffer>


GetImage::GetImage(QWidget *parent)
{
//    dstip.setAddress("192.168.62.199");
   
}

GetImage::~GetImage()
{
#if 1
    delete[] receivedData;
#else
free(yuvImage);
free(rgbData);
free(change_rgbData); // 释放内存
#endif
}

bool GetImage::init()
{
    if (start_flag)
    {
        #if 1
            udpSocket_recv = new QUdpSocket(this);    // 接收YOLOV5处理后的视频数据
            udpSocket_recv->bind(8888);               // 监听本机的8888端口
            udpSocket_send = new QUdpSocket(this);    // 发送目标主机的UDP
            receivedData = new char[640*384*3];  // 动态分配内存用于存放接收到的数据
            receivedDataSize = 0;  // 已接收数据的大小
             // 打开 JSON 文件
            QFile file("config.json");
            if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) {
                qDebug() << "Failed to open JSON file.";
                return 1;
            }

            // 读取 JSON 内容
            QByteArray jsonData = file.readAll();
            file.close();
            // 解析 JSON 数据
            QJsonParseError error;
            QJsonDocument jsonDoc = QJsonDocument::fromJson(jsonData, &error);
            if (jsonDoc.isNull()) {
                qDebug() << "Failed to parse JSON file:" << error.errorString();
                return 1;
            }
            // 检查是否为对象
            if (!jsonDoc.isObject()) {
                qDebug() << "JSON document is not an object.";
                return 1;
            }

            // 获取根对象
            QJsonObject root = jsonDoc.object();
             // 从 JSON 对象中获取 "local" 键对应的值，这里是一个 JSON 数组
            QJsonArray localArray = root["dist"].toArray();
            // 遍历数组并输出每个元素的值
            qDebug() << "dist:";
            for(const QJsonValue& value : localArray) {
                qDebug() << value.toString();
            }
            dstip.setAddress(localArray[0].toString());
            dstport = localArray[1].toString().toInt();
     
        #else
            yuvImage = static_cast<char*>(malloc(IMAGEWIDTH * IMAGEHEIGHT * 3 / 2));
            memset(yuvImage, 0, IMAGEWIDTH * IMAGEHEIGHT * 3 / 2);
            rgbData = (uint8_t *)(malloc(IMAGEWIDTH * IMAGEHEIGHT * 3));
            memset(rgbData, 0, IMAGEWIDTH * IMAGEHEIGHT * 3);
            change_rgbData = (uint8_t*)malloc(outputWidth * outputHeight * 3);
            memset(change_rgbData, 0, outputWidth * outputHeight * 3);
            init_camera();
        #endif

      
        start_flag = 0;
    }
    return true;
}

void GetImage::disinit()
{
    #if 1
    #else
    close_v4l2();
    #endif
    this->exit();
}

void GetImage::run()
{   
    system("sudo ./yolov5n_traffic_light hhb_out/shl.hhb.bm /dev/video0");
#if 1

// while (1)
// {
//     counter
//     // // 读取BMP图片文件
//     // QImage image("result.bmp");
//     // emit sendReceiveImageToWidget(image);    //发送到窗口显示
//     // sendYolov5ImageToTargetHost(image,packet_unit);     //发送给目标主机
//     // // 如果读取失败，输出错误信息并退出程序
//     // if (image.isNull()) {
//     //     qDebug() << "Error: Unable to load image.";
//     // }
// }


#else
    int rectX = 100;
    int rectY = 100;
    int rectWidth = 50;
    int rectHeight = 30;
    while (1)
    {
        /* poll */
        memset(fds, 0, sizeof(fds));
        fds[0].fd = fd;
        fds[0].events = POLLIN;
        if (1 == poll(fds, 1, -1))
        {
            /* 把buffer取出队列 */
            struct v4l2_buffer buf;
            memset(&buf, 0, sizeof(struct v4l2_buffer));
            buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
            buf.memory = V4L2_MEMORY_MMAP;
            
            if (0 != ioctl(fd, VIDIOC_DQBUF, &buf))
            {
                perror("Unable to dequeue buffer");
            }
    
            uint8_t* rgb_image = NULL;
            rgb_image = rgbData;
         
            // usb_camera_file_save(yuvImage);
        
            YUYVToRGB((const uint8_t*)(bufs[buf.index]), 640, 480, rgb_image);

            // 缩放 RGB 数据并补零
            uint8_t* scaledRGBData = NULL;
            scaledRGBData = change_rgbData;
            ScaleAndPadRGB(rgb_image, 640,480, scaledRGBData, outputWidth, outputHeight);
      
            // 在 RGB 数据上绘制矩形框
            DrawRectangleOutline(rgb_image, outputWidth, outputHeight, rectX, rectY, rectWidth, rectHeight);
           
            // 上下翻转 RGB 数据
            // FlipImageVertical(rgb_image, outputWidth, outputHeight);
            
            // 保存 RGB 数据为 BMP 文件
            // SaveRGBToBMP("/mnt/hgfs/Share_file-2/licheePI_USB_Camera/output.bmp", rgbData, outputWidth, outputHeight);
            
            // 创建 QImage 对象并使用rgb_data初始化
            QImage image(rgb_image, outputWidth, outputHeight, QImage::Format_RGB888);
            emit sendReceiveImageToWidget(image);
         
            //对图像进行二次处理再次实现
            /* 把buffer放入队列 */
            if (0 != ioctl(fd, VIDIOC_QBUF, &buf))
            {
                perror("Unable to queue buffer");
            }
        }
    }
  
    if (0 != ioctl(fd, VIDIOC_STREAMOFF, &type))
    {
        perror("Unable to stop capture");
        // return -1;
    }
    printf("stop capture ok\n");
    close(fd);
#endif
}
# if 1
QImage GetImage::convertBGRtoQImage(const uint8_t* bgrData, int width, int height) {
    // 创建一个新的数组用于存储转换后的RGB数据
    uint8_t* rgbData = new uint8_t[width * height * 3]; // 每个像素有3个字节

    // 执行BGR到RGB的转换
    for (int y = 0; y < height; ++y) {
        for (int x = 0; x < width; ++x) {
            // 计算当前像素在BGR数据中的索引
            int bgrIndex = (y * width + x) * 3;

            // 计算当前像素在RGB数据中的索引
            int rgbIndex = ((height - 1 - y) * width + x) * 3; // 垂直方向翻转以适应QImage的坐标系

            // 将BGR数据中的通道顺序调整为RGB，并存储到新数组中
            rgbData[rgbIndex] = bgrData[bgrIndex + 2]; // 将B通道复制到R通道
            rgbData[rgbIndex + 1] = bgrData[bgrIndex + 1]; // 将G通道保持不变
            rgbData[rgbIndex + 2] = bgrData[bgrIndex]; // 将R通道复制到B通道
        }
    }

    // 创建QImage对象
    QImage image(rgbData, width, height, QImage::Format_RGB888);

    // 使用完毕后记得释放内存
    delete[] rgbData;

    return image;
}

QImage GetImage::BGRToRGB(const uint8_t* bgr, int width, int height) {
    // 创建一个新的数组用于存储转换后的RGB数据
    uint8_t* rgbData = new uint8_t[width * height * 3]; // 每个像素有3个字节
    int size = width * height;
    for (int i = 0; i < size * 3; i += 3) {
        // 依次将 BGR 转换为 RGB
        rgbData[i] = bgr[i + 2];  // 存储红色分量
        rgbData[i + 1] = bgr[i + 1];  // 存储绿色分量
        rgbData[i + 2] = bgr[i];  // 存储蓝色分量
    }
     // 创建QImage对象
    QImage image(rgbData, width, height, QImage::Format_RGB888);

    // 使用完毕后记得释放内存
    delete[] rgbData;
    return image;
}

//接收经过yolov5处理后的视频流
void GetImage::receiveYolov5Image()
{
 
    qint64 bytesRead;

    QHostAddress adrr ;
    quint16 port;
   
    // image_data = receivedData;
    char  curindex[4];
   
    // 接收 UDP 数据报
    while (udpSocket_recv->hasPendingDatagrams()) {
        
        QByteArray datagram;
       
        datagram.resize(udpSocket_recv->pendingDatagramSize());
         // 读取收到的数据包
        bytesRead =  udpSocket_recv->readDatagram(datagram.data(), datagram.size(), &adrr, &port);
 
        QDataStream stream(datagram);
        // 从流中读取四个字节到char数组中
        stream.readRawData(curindex, 4);
        QString str_index(curindex);
        cur_index = str_index.toInt();
        if(packet_size * cur_index != receivedDataSize)
        {
            receivedDataSize = 0;
            // receivedData = receivedData - receivedDataSize;
            //  qDebug() << "error" <<  Qt::endl;
        }
        // stream.skipRawData(4);
      
        if (cur_index < 22)
        {
            stream.readRawData(receivedData + receivedDataSize, packet_size);
            receivedDataSize += packet_size;
        }
        else
        {
            stream.readRawData(receivedData + receivedDataSize, expectedTotalSize - receivedDataSize);
            receivedDataSize += (expectedTotalSize - receivedDataSize);
        }

        // qDebug() << "current index: " <<  cur_index << Qt::endl;
        // qDebug() << "Received data size:" << bytesRead << Qt::endl;
        if(cur_index == 22)
        {
            error_flag = 0;
            cur_index = 0;
            QImage image((uint8_t *)receivedData, 640, 384, QImage::Format_RGB888);

            // uint8_t red = receivedData[0];
            // uint8_t green = receivedData[1];
            // uint8_t blue = receivedData[2];
            //  qDebug() <<  "Red = " << (int)red << ", "<< "Green = " << (int)green << ", "<< "Blue = " << (int)blue ;
            // QImage image = BGRToRGB((uint8_t *)receivedData, 640, 384);
            emit sendReceiveImageToWidget(image);    //发送到窗口显示
            sendYolov5ImageToTargetHost(image,packet_unit);     //发送给目标主机
            receivedDataSize = 0;
            // if(counter == 1000)
            // {
            //     system("sudo killall yolov5n_traffic_light");
            //     int i = 1000000;
            //     while(i--)
            //     {
            //         int j = 900;
            //         while (j--);
            //     }
            //     system("sudo ./yolov5n_traffic_light hhb_out/shl.hhb.bm /dev/video0");
            //     counter=0;
            // }
            // counter++;
        }  
    }
}

void GetImage::sendYolov5ImageToTargetHost(const QImage &image, int packet_unit)   //分包传输
{
    // 将图像编码为 JPEG 格式
    QByteArray encoded_image;
    QBuffer buffer(&encoded_image);
    buffer.open(QIODevice::WriteOnly);
    image.save(&buffer, "JPEG", 50);

    /*1代表是图片数据、2代表是速度*/
    qint32 data_type = 1;         //发送的数据类别

    QByteArray image_data;      //图片的数据
    qint32 data_length;         //数据的长度

    // 获取图像数据
    data_length = encoded_image.size();
    image_data = encoded_image;

    qint32 current_packet_index = 0;

    //向上去整，防止少发
    qint32 packet_count = (data_length + packet_unit - 1) / packet_unit;

    for(int i = 0; i < packet_count; i++)
    {
        // 判断是否为最后一个数据包
        bool is_last_packet = (current_packet_index == packet_count - 1);

        // 构建数据包
        QByteArray packet = image_data.mid(current_packet_index * packet_unit, packet_unit);
        ++current_packet_index;

        // 添加包头信息
        QByteArray send_data;
        QDataStream stream(&send_data, QIODevice::WriteOnly);
        stream  << qint32(data_length) << qint32(current_packet_index);
        send_data.append(packet);

        // 发送数据包
        udpSocket_send->writeDatagram(send_data,dstip,dstport);
      
        //  qDebug() << "IPv4 Address: " << dstip.toString();
        //  qDebug() << "Port: " << dstport;
        // if (is_last_packet) {
        //     qDebug() << "Image sent successfully.";
        // }
    }
}

#else 
int GetImage::init_camera()
{
     /* open */
    fd = open(FILE_VIDEO, O_RDWR);
    if (fd < 0)
    {
        printf("can not open %s\n", FILE_VIDEO);
        return -1;
    }

    /* 查询能力 */
    struct v4l2_capability cap;
    memset(&cap, 0, sizeof(struct v4l2_capability));
    
    if (0 == ioctl(fd, VIDIOC_QUERYCAP, &cap))
    {
        printf("driver:\t\t%s\n", cap.driver);
        printf("card:\t\t%s\n", cap.card);
        printf("bus_info:\t%s\n", cap.bus_info);
        printf("version:\t%d\n", cap.version);
        printf("capabilities:\t%x\n", cap.capabilities); 
        if((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
            fprintf(stderr, "Error opening device %s: video capture not supported.\n",
                    FILE_VIDEO);
            return -1;
        }
        
        if(!(cap.capabilities & V4L2_CAP_STREAMING)) {
            fprintf(stderr, "%s does not support streaming i/o\n", FILE_VIDEO);
            return -1;
        }
    }
    else
    {
        printf("can not get capability\n");
        return -1;
    }

    printf("Support format:\n");
    while (1)
    {
        /* 枚举格式 */
        fmtdesc.index = fmt_index;  // 比如从0开始
        fmtdesc.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE;  // 指定type为"捕获"
        if (0 != ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc))
            break;
        // printf("\t%d.%s\n", fmtdesc.index + 1, fmtdesc.description);
        frame_index = 0;
        while (1)
        {
            /* 枚举这种格式所支持的帧大小 */
            memset(&fsenum, 0, sizeof(struct v4l2_frmsizeenum));
            fsenum.pixel_format = fmtdesc.pixelformat;
            fsenum.index = frame_index;
            
            if (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &fsenum) == 0)
            {
                printf("format %s,%d, framesize %d: %d x %d\n", fmtdesc.description, fmtdesc.pixelformat, frame_index, fsenum.discrete.width, fsenum.discrete.height);
            }
            else
            {
                break;
            }

            frame_index++;
        }

        fmt_index++;
    }
    /* 设置格式 */
    struct v4l2_format fmt;
    memset(&fmt, 0, sizeof(struct v4l2_format));
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt.fmt.pix.width = IMAGEWIDTH;
    fmt.fmt.pix.height = IMAGEHEIGHT;
    // fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
    fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
    // fmt.fmt.pix.field = V4L2_FIELD_ANY;
    fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
    if (0 == ioctl(fd, VIDIOC_S_FMT, &fmt))
    {
        printf("set format ok: %d x %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
    }
    else
    {
        printf("can not set format\n");
        return -1;
    }

    /*
     * 申请buffer
     */
    struct v4l2_requestbuffers rb;
    memset(&rb, 0, sizeof(struct v4l2_requestbuffers));
    rb.count = 8;
    rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    rb.memory = V4L2_MEMORY_MMAP;

    if (0 == ioctl(fd, VIDIOC_REQBUFS, &rb))
    {
        /* 申请成功后, mmap这些buffer */
        buf_cnt = rb.count;
        for(i = 0; i < rb.count; i++) {
            struct v4l2_buffer buf;
            memset(&buf, 0, sizeof(struct v4l2_buffer));
            buf.index = i;
            buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
            buf.memory = V4L2_MEMORY_MMAP;
            if (0 == ioctl(fd, VIDIOC_QUERYBUF, &buf))
            {
                /* mmap */
                bufs[i] = mmap(0 /* start anywhere */ ,
                                  buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
                                  buf.m.offset);
                if(bufs[i] == MAP_FAILED) {
                    perror("Unable to map buffer");
                    return -1;
                }
            }
            else
            {
                printf("can not query buffer\n");
                return -1;
            }            
        }

        printf("map %d buffers ok\n", buf_cnt);
        
    }
    else
    {
        printf("can not request buffers\n");
        return -1;
    }

    /* 把所有buffer放入"空闲链表" */
    for(i = 0; i < buf_cnt; ++i) {
        struct v4l2_buffer buf;
        memset(&buf, 0, sizeof(struct v4l2_buffer));
        buf.index = i;
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        if (0 != ioctl(fd, VIDIOC_QBUF, &buf))
        {
            perror("Unable to queue buffer");
            return -1;
        }
    }
    printf("queue buffers ok\n");
    /* 启动摄像头 */
    if (0 != ioctl(fd, VIDIOC_STREAMON, &type))
    {
        perror("Unable to start capture");
        return -1;
    }
    printf("start capture ok\n");
}

int GetImage::close_v4l2(void)
{                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       
    ioctl(fd, VIDIOC_STREAMOFF, &type); /* 关闭视频流 */
    if (fd != -1)
    {
        close(fd);
        return 1;
    }
    return -1;
}

int GetImage::grap_image()
{
    while (1)
    {
        /* poll */
        memset(fds, 0, sizeof(fds));
        fds[0].fd = fd;
        fds[0].events = POLLIN;
        if (1 == poll(fds, 1, -1))
        {
            /* 把buffer取出队列 */
            struct v4l2_buffer buf;
            memset(&buf, 0, sizeof(struct v4l2_buffer));
            buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
            buf.memory = V4L2_MEMORY_MMAP;
            
            if (0 != ioctl(fd, VIDIOC_DQBUF, &buf))
            {
                perror("Unable to dequeue buffer");
                return -1;
            }
            
            /* 把buffer的数据存为文件 */
            sprintf(filename, "video_raw_data_%04d.jpg", file_cnt++);
            int fd_file = open(filename, O_RDWR | O_CREAT, 0666);
            if (fd_file < 0)
            {
                printf("can not create file : %s\n", filename);
            }
            printf("capture to %s\n", filename);
            write(fd_file, bufs[buf.index], buf.bytesused);
            close(fd_file);

            /* 把buffer放入队列 */
            if (0 != ioctl(fd, VIDIOC_QBUF, &buf))
            {
                perror("Unable to queue buffer");
                return -1;
            }
        }
    }

    if (0 != ioctl(fd, VIDIOC_STREAMOFF, &type))
    {
        perror("Unable to stop capture");
        return -1;
    }
    printf("stop capture ok\n");
    close(fd);
}

void GetImage::yuyv2yuv420(char *inbuf,  char *outbuf)
{
    char *y = NULL;
    char *u = NULL;
    char *v = NULL;
    int u_c = 0;
    int v_c = 0;

    y = outbuf;
    u = y + IMAGEWIDTH * IMAGEHEIGHT;
    v = u + IMAGEWIDTH * IMAGEHEIGHT / 4;
    bool swith = true;
    uint32_t i = 0, j = 0;

    for (i = 0; i < (IMAGEWIDTH * IMAGEHEIGHT * 2); i += 2)
    {
        *y++ = inbuf[i];
        
    }
    for (i = 0; i < IMAGEHEIGHT; i += 2)
    {
        for (j = 1; j < IMAGEWIDTH << 1; j += 2)
        {
            if (swith)
            {
                *u++ = ((uint8_t *)(inbuf + (i * (IMAGEWIDTH << 1))))[j];
                swith = false;
                u_c++;
            }
            else
            {
                *v++ = ((uint8_t *)(inbuf + (i * (IMAGEWIDTH << 1))))[j];
                swith = true;
                v_c++;
            }
        }
    }
}

// 定义将 YUYV 格式数据转换为 RGB 格式的函数
void GetImage::YUYVToRGB(const uint8_t* yuyv, int width, int height, uint8_t* rgb) {
    int size = width * height;
    for (int i = 0, j = 0; i < size * 2; i += 4, j += 6) {
        int Y1 = yuyv[i];
        int U = yuyv[i + 1];
        int Y2 = yuyv[i + 2];
        int V = yuyv[i + 3];

        // 转换第一个像素
        int R1 = Y1 + 1.402 * (V - 128);
        int G1 = Y1 - 0.344136 * (U - 128) - 0.714136 * (V - 128);
        int B1 = Y1 + 1.772 * (U - 128);

        // 转换第二个像素
        int R2 = Y2 + 1.402 * (V - 128);
        int G2 = Y2 - 0.344136 * (U - 128) - 0.714136 * (V - 128);
        int B2 = Y2 + 1.772 * (U - 128);

        // 将 RGB 值限定在 0 到 255 之间
        R1 = limitToRange(R1);
        G1 = limitToRange(G1);
        B1 = limitToRange(B1);
        R2 = limitToRange(R2);
        G2 = limitToRange(G2);
        B2 = limitToRange(B2);

        // 将 RGB 值存储到输出数组中
        rgb[j] = R1;
        rgb[j + 1] = G1;
        rgb[j + 2] = B1;
        rgb[j + 3] = R2;
        rgb[j + 4] = G2;
        rgb[j + 5] = B2;
    }
}

int GetImage::usb_camera_file_save(char *image)
{
    FILE *usb_fd ;
    int counter = 0;
    char *file_name;
    // file_name=malloc(256);
    file_name = static_cast<char*>(malloc(256));

    memset(file_name,0,256);

    sprintf(file_name,"%s%s",DES_TO_SAVE,TO_SAVE_NAME);

    usb_fd = fopen(file_name, "w+");

    if ( usb_fd == NULL)       //打开二进制代码，允许读写（创建）
    {
        printf("Error opening %s\n",file_name);
        free(file_name);
        return (-1);
    }

    counter = fwrite(image, 1, 640 * 480 * 3 / 2, usb_fd);
    if(counter!=640 * 480 * 3 / 2)
    {
        printf("Error writing %s\n",file_name);
        fclose(usb_fd);
        free(file_name);
        return -1;
    }

    fclose(usb_fd);
    free(file_name);
    return 0;
}

// 定义 YUV 转 RGB 的函数
void GetImage::YUV420ToRGB(const uint8_t* yuv, int width, int height, uint8_t* rgb) {
    int size = width * height;
    const uint8_t* y = yuv;
    const uint8_t* u = yuv + size;
    const uint8_t* v = yuv + size + size/4;

    for (int i = 0; i < height; i++) {
        for (int j = 0; j < width; j++) {
            int index = i * width + j;
            int yIndex = i * width + j;
            int uvIndex = (i/2) * (width/2) + (j/2);

            int Y = y[yIndex];
            int U = u[uvIndex];
            int V = v[uvIndex];

            // RGB 转换公式
            int R = Y + 1.402 * (V - 128);
            int G = Y - 0.344136 * (U - 128) - 0.714136 * (V - 128);
            int B = Y + 1.772 * (U - 128);

            // 将 RGB 值限定在 0 到 255 之间
            // R = std::min(std::max(0, R), 255);
            // G = std::min(std::max(0, G), 255);
            // B = std::min(std::max(0, B), 255);
            R = limitToRange(R);
            G = limitToRange(G);
            B = limitToRange(B);

            int rgbIndex = index * 3;
            rgb[rgbIndex] = R;
            rgb[rgbIndex + 1] = G;
            rgb[rgbIndex + 2] = B;
        }
    }
}

uint8_t GetImage::limitToRange(uint8_t value) {
    if (value < 0)
        return 0;
    else if (value > 255)
        return 255;
    else
        return value;
}

// 保存 RGB 数据为 BMP 文件
void GetImage::SaveRGBToBMP(const std::string& filename, const uint8_t* data, int width, int height) {
    std::ofstream file(filename, std::ios::out | std::ios::binary);

    int headers[13] = {
        0,
        width * height * 3 + 54,
        54,
        40,
        width,
        height,
        (24 << 16),
        0,
        width * height * 3,
        0,
        0,
        0,
        0
    };

    file.write("BM", 2);
    file.write((char*)&headers, 52);

    for (int i = 0; i < width * height; i++) {
        file.put(data[i * 3 + 2]);
        file.put(data[i * 3 + 1]);
        file.put(data[i * 3]);
    }
    std::cout << "done" << std::endl;
    file.close();
}

// 等比缩放 RGB 数据并补零
void GetImage::ScaleAndPadRGB(const uint8_t* input, int inputWidth, int inputHeight, uint8_t* output, int outputWidth, int outputHeight) {
    float scaleX = static_cast<float>(inputWidth) / outputWidth;
    float scaleY = static_cast<float>(inputHeight) / outputHeight;

    for (int y = 0; y < outputHeight; ++y) {
        for (int x = 0; x < outputWidth; ++x) {
            int inputX = static_cast<int>(x * scaleX);
            int inputY = static_cast<int>(y * scaleY);

            if (inputX >= inputWidth || inputY >= inputHeight) {
                output[(y * outputWidth + x) * 3] = 0;
                output[(y * outputWidth + x) * 3 + 1] = 0;
                output[(y * outputWidth + x) * 3 + 2] = 0;
            } else {
                output[(y * outputWidth + x) * 3] = input[(inputY * inputWidth + inputX) * 3];
                output[(y * outputWidth + x) * 3 + 1] = input[(inputY * inputWidth + inputX) * 3 + 1];
                output[(y * outputWidth + x) * 3 + 2] = input[(inputY * inputWidth + inputX) * 3 + 2];
            }
        }
    }
}
// 在 RGB 数据上绘制矩形框
void GetImage::DrawRectangle(uint8_t* data, int width, int height, int x, int y, int rectWidth, int rectHeight) {
    for (int i = 0; i < rectHeight; ++i) {
        if (y + i < 0 || y + i >= height) {
            continue;
        }

        for (int j = 0; j < rectWidth; ++j) {
            if (x + j < 0 || x + j >= width) {
                continue;
            }

            if (i == 0 || i == rectHeight - 1 || j == 0 || j == rectWidth - 1) {
                // 绘制矩形框边界
                data[((y + i) * width + x + j) * 3] = 255;
                data[((y + i) * width + x + j) * 3 + 1] = 0;
                data[((y + i) * width + x + j) * 3 + 2] = 0;
            } else {
                // 填充矩形框内部
                data[((y + i) * width + x + j) * 3] = 255;
                data[((y + i) * width + x + j) * 3 + 1] = 255;
                data[((y + i) * width + x + j) * 3 + 2] = 255;
            }
        }
    }
}
// 在 RGB 数据上绘制矩形框边缘
void GetImage::DrawRectangleOutline(uint8_t* data, int width, int height, int x, int y, int rectWidth, int rectHeight) {
    // 绘制上边框
    for (int i = 0; i < rectWidth; ++i) {
        int posX = x + i;
        int posY = y;
        if (posX >= 0 && posX < width && posY >= 0 && posY < height) {
            data[(posY * width + posX) * 3] = 255;  // R
            data[(posY * width + posX) * 3 + 1] = 0;  // G
            data[(posY * width + posX) * 3 + 2] = 0;  // B
        }
    }

    // 绘制下边框
    for (int i = 0; i < rectWidth; ++i) {
        int posX = x + i;
        int posY = y + rectHeight - 1;
        if (posX >= 0 && posX < width && posY >= 0 && posY < height) {
            data[(posY * width + posX) * 3] = 255;  // R
            data[(posY * width + posX) * 3 + 1] = 0;  // G
            data[(posY * width + posX) * 3 + 2] = 0;  // B
        }
    }

    // 绘制左边框
    for (int i = 0; i < rectHeight; ++i) {
        int posX = x;
        int posY = y + i;
        if (posX >= 0 && posX < width && posY >= 0 && posY < height) {
            data[(posY * width + posX) * 3] = 255;  // R
            data[(posY * width + posX) * 3 + 1] = 0;  // G
            data[(posY * width + posX) * 3 + 2] = 0;  // B
        }
    }

    // 绘制右边框
    for (int i = 0; i < rectHeight; ++i) {
        int posX = x + rectWidth - 1;
        int posY = y + i;
        if (posX >= 0 && posX < width && posY >= 0 && posY < height) {
            data[(posY * width + posX) * 3] = 255;  // R
            data[(posY * width + posX) * 3 + 1] = 0;  // G
            data[(posY * width + posX) * 3 + 2] = 0;  // B
        }
    }
}

// 上下翻转 RGB 数据
void GetImage::FlipImageVertical(uint8_t* data, int width, int height) {
    uint8_t* tempRow = new uint8_t[width * 3];

    for (int row = 0; row < height / 2; ++row) {
        int oppositeRow = height - 1 - row;

        // 交换当前行和相对应的对称行
        memcpy(tempRow, &data[row * width * 3], width * 3);
        memcpy(&data[row * width * 3], &data[oppositeRow * width * 3], width * 3);
        memcpy(&data[oppositeRow * width * 3], tempRow, width * 3);
    }

    delete[] tempRow;
}
#endif


// find . -type f -exec touch {} +
