//
// Created by steven on 2025/9/4.
//

#include "image_converto.h"
#include <numeric>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <algorithm>
#include <cctype>
#include <cstring>
#include <rivs/rivs_ext.h>
#include "rivs/rivs_runtime.h"

#include "rivsaten/rivsaten.h"

#include <krt/mmu.h>

extern "C" {
#include <libavutil/pixfmt.h>
}

namespace rivscv {

    // Helper: dtype size
    static inline size_t aten_dtype_size(rivsatenDataType_t dtype) {
        switch (dtype) {
            case RIVSATEN_DATA_U8:   return 1;
            case RIVSATEN_DATA_I8:   return 1;
            case RIVSATEN_DATA_I16:  return 2;
            case RIVSATEN_DATA_U16:  return 2;
            case RIVSATEN_DATA_I32:  return 4;
            case RIVSATEN_DATA_U32:  return 4;
            case RIVSATEN_DATA_FP16: return 2;
            case RIVSATEN_DATA_BF16: return 2;
            case RIVSATEN_DATA_FP32: return 4;
            case RIVSATEN_DATA_I64:  return 8;
            case RIVSATEN_DATA_U64:  return 8;
            case RIVSATEN_DATA_F64: return 8;
            default: return 1;
        }
    }

    static inline RivsInference::DataType aten_dtype_to_rivs_dtype(rivsatenDataType_t dtype) {
        switch (dtype) {
            case RIVSATEN_DATA_U8:   return RivsInference::DataType::TIF_UINT8;
            case RIVSATEN_DATA_I8:   return RivsInference::DataType::TIF_INT8;
            case RIVSATEN_DATA_I16:  return RivsInference::DataType::TIF_INT16;
            case RIVSATEN_DATA_U16:  return RivsInference::DataType::TIF_UINT16;
            case RIVSATEN_DATA_I32:  return RivsInference::DataType::TIF_INT32;
            case RIVSATEN_DATA_U32:  return RivsInference::DataType::TIF_UINT32;
            case RIVSATEN_DATA_FP16: return RivsInference::DataType::TIF_FP16;
            case RIVSATEN_DATA_BF16: return RivsInference::DataType::TIF_BF16;
            case RIVSATEN_DATA_FP32: return RivsInference::DataType::TIF_FP32;
            case RIVSATEN_DATA_I64:  return RivsInference::DataType::TIF_INT64;
            case RIVSATEN_DATA_U64:  return RivsInference::DataType::TIF_UINT64;
            case RIVSATEN_DATA_F64: return RivsInference::DataType::TIF_FP64;
            default: return RivsInference::DataType::TIF_UINT8;
        }
    }

    static inline rivsatenDataType_t rivs_dtype_to_aten_dtype(RivsInference::DataType dtype) {
        switch (dtype) {
            case RivsInference::DataType::TIF_UINT8:   return RIVSATEN_DATA_U8;
            case RivsInference::DataType::TIF_INT8:   return RIVSATEN_DATA_I8;
            case RivsInference::DataType::TIF_INT16:  return RIVSATEN_DATA_I16;
            case RivsInference::DataType::TIF_UINT16:  return RIVSATEN_DATA_U16;
            case RivsInference::DataType::TIF_INT32:  return RIVSATEN_DATA_I32;
            case RivsInference::DataType::TIF_UINT32:  return RIVSATEN_DATA_U32;
            case RivsInference::DataType::TIF_FP16: return RIVSATEN_DATA_FP16;
            case RivsInference::DataType::TIF_BF16: return RIVSATEN_DATA_BF16;
            case RivsInference::DataType::TIF_FP32: return RIVSATEN_DATA_FP32;
            case RivsInference::DataType::TIF_INT64:  return RIVSATEN_DATA_I64;
            case RivsInference::DataType::TIF_UINT64:  return RIVSATEN_DATA_U64;
            case RivsInference::DataType::TIF_FP64: return RIVSATEN_DATA_F64;
            default: return RIVSATEN_DATA_U8;
        }
    }

    static inline size_t rivs_dtype_size(RivsInference::DataType dtype) {
        return aten_dtype_size(rivs_dtype_to_aten_dtype(dtype));
    }


    RivsImageTensor::RivsImageTensor(int device_id)
    {
        m_data = nullptr;
        m_data_size = 0;
        m_data_type = RivsInference::DataType::TIF_FP32;
        m_own_data = false;
        m_device_id = device_id;
    }

    RivsImageTensor::RivsImageTensor(int device_id, const std::vector<int> &shape, const RivsInference::DataType data_type)
    {
        m_data_size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
        m_shape = shape;
        m_data_type = data_type;
        m_device_id = device_id;
        auto sizeInBytes = m_data_size * rivs_dtype_size(m_data_type);
        int current_device_id = -1;
        rivsGetDevice(&current_device_id);
        //std::cout << "device_id: " << device_id << std::endl;
        assert(device_id == current_device_id);
        auto ret = rivsMalloc(&m_data, sizeInBytes);
        assert(rivsSuccess == ret);
        ret = rivsMemset(m_data, 0, sizeInBytes);
        assert(rivsSuccess == ret);
        m_own_data = true;

    }


    RivsImageTensor::RivsImageTensor(int device_id, void *data, const std::vector<int> &shape, const RivsInference::DataType data_type)
    {
        m_data = data;
        m_data_size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
        m_shape = shape;
        m_data_type = data_type;
        m_own_data = false;
        m_device_id = device_id;
    }

    RivsImageTensor::~RivsImageTensor()
    {
        if (m_own_data && m_data) {
            assert(rivsSuccess == rivsFree(m_data));
            m_data = nullptr;
        }
    }


    void image_tensor_save(RivsImageTensor *tensor,const std::string& filepath)
    {
        // Basic validation
        size_t elemSize = rivs_dtype_size(tensor->m_data_type);
        size_t sizeInBytes = tensor->m_data_size * elemSize;
        if (tensor->m_data == nullptr || sizeInBytes == 0) {
            std::cerr << "SaveToFile: empty tensor or null device pointer" << std::endl;
            return;
        }
        if (tensor->m_shape.empty()) {
            std::cerr << "SaveToFile: empty shape" << std::endl;
            return;
        }

        // Determine dims and layout
        int C = 1, H = 0, W = 0;
        bool isCHW = false;
        if (tensor->m_shape.size() == 3) {
            // Assume CHW
            C = tensor->m_shape[0]; H = tensor->m_shape[1]; W = tensor->m_shape[2];
            isCHW = true;
        } else if (tensor->m_shape.size() == 2) {
            // Assume HW
            C = 1; H = tensor->m_shape[0]; W = tensor->m_shape[1];
            isCHW = false; // but will treat as single channel
        } else {
            std::cerr << "SaveToFile: unsupported shape size=" << tensor->m_shape.size() << std::endl;
            return;
        }
        if (H <= 0 || W <= 0 || C <= 0) {
            std::cerr << "SaveToFile: invalid shape CxHxW=" << C << "x" << H << "x" << W << std::endl;
            return;
        }

        // Copy device -> host with appropriate type
        cv::Mat image8u; // final image to write

        if (tensor->m_data_type == RivsInference::DataType::TIF_FP32) {
            // Pull as float32
            std::vector<float> host(tensor->m_data_size);
            if (rivsMemcpy(host.data(), tensor->m_data, sizeInBytes, rivsMemcpyDeviceToHost) != rivsSuccess) {
                std::cerr << "SaveToFile: D2H failed for FP32" << std::endl;
                return;
            }

            // Build HWC float image from CHW or HW
            if (C == 1) {
                cv::Mat fimg(H, W, CV_32FC1, host.data());
                // Determine scale: assume [0,1] -> scale 255, else clamp to [0,255]
                double minv, maxv; cv::minMaxLoc(fimg, &minv, &maxv);
                double alpha = 1.0, beta = 0.0;
                if (maxv <= 1.0 + 1e-6 && minv >= 0.0) { alpha = 255.0; }
                else if (maxv <= 255.0 && minv >= 0.0) { alpha = 1.0; }
                else if (maxv > minv) { alpha = 255.0 / (maxv - minv); beta = -minv * alpha; }
                cv::Mat tmp; fimg.convertTo(tmp, CV_8UC1, alpha, beta);
                image8u = tmp; // grayscale
            } else if (C == 3 || C == 4) {
                // CHW -> vector of channels
                size_t planeSize = static_cast<size_t>(H) * static_cast<size_t>(W);
                std::vector<cv::Mat> planes;
                planes.reserve(C);
                for (int c = 0; c < C; ++c) {
                    float* p = host.data() + c * planeSize;
                    planes.emplace_back(H, W, CV_32FC1, p);
                }
                cv::Mat fimg;
                cv::merge(planes, fimg); // HxWxC, float
                // Compute global min/max across channels
                double minv = 0.0, maxv = 0.0;
                for (int c = 0; c < C; ++c) {
                    double cmin, cmax; cv::minMaxLoc(planes[c], &cmin, &cmax);
                    if (c == 0 || cmin < minv) minv = cmin;
                    if (c == 0 || cmax > maxv) maxv = cmax;
                }
                double alpha = 1.0, beta = 0.0;
                if (maxv <= 1.0 + 1e-6 && minv >= 0.0) { alpha = 255.0; }
                else if (maxv <= 255.0 && minv >= 0.0) { alpha = 1.0; }
                else if (maxv > minv) { alpha = 255.0 / (maxv - minv); beta = -minv * alpha; }
                cv::Mat tmp8u; fimg.convertTo(tmp8u, (C == 3) ? CV_8UC3 : CV_8UC4, alpha, beta);
                // Assume tensor is RGB(A); convert to BGR(A) for imwrite
                if (C == 3) {
                    cv::cvtColor(tmp8u, image8u, cv::COLOR_RGB2BGR);
                } else {
                    cv::cvtColor(tmp8u, image8u, cv::COLOR_RGBA2BGRA);
                }
            } else {
                std::cerr << "SaveToFile: unsupported channels=" << C << " for FP32" << std::endl;
                return;
            }
        } else if (tensor->m_data_type == RivsInference::DataType::TIF_UINT8) {
            // Pull as uint8
            std::vector<uint8_t> host(tensor->m_data_size);
            if (rivsMemcpy(host.data(), tensor->m_data, sizeInBytes, rivsMemcpyDeviceToHost) != rivsSuccess) {
                std::cerr << "SaveToFile: D2H failed for UINT8" << std::endl;
                return;
            }
            if (C == 1) {
                image8u = cv::Mat(H, W, CV_8UC1, host.data()).clone();
            } else if (C == 3 || C == 4) {
                size_t planeSize = static_cast<size_t>(H) * static_cast<size_t>(W);
                std::vector<cv::Mat> planes;
                planes.reserve(C);
                for (int c = 0; c < C; ++c) {
                    uint8_t* p = host.data() + c * planeSize;
                    planes.emplace_back(H, W, CV_8UC1, p);
                }
                cv::Mat rgbOrRgba; cv::merge(planes, rgbOrRgba);
                if (C == 3) {
                    cv::cvtColor(rgbOrRgba, image8u, cv::COLOR_RGB2BGR);
                } else {
                    cv::cvtColor(rgbOrRgba, image8u, cv::COLOR_RGBA2BGRA);
                }
            } else {
                std::cerr << "SaveToFile: unsupported channels=" << C << " for UINT8" << std::endl;
                return;
            }
        } else {
            // Fallback: convert to 8U via raw bytes and attempt single-channel
            std::vector<unsigned char> host(sizeInBytes);
            if (rivsMemcpy(host.data(), tensor->m_data, sizeInBytes, rivsMemcpyDeviceToHost) != rivsSuccess) {
                std::cerr << "SaveToFile: D2H failed for unsupported dtype" << std::endl;
                return;
            }
            if (C == 1) {
                image8u = cv::Mat(H, W, CV_8UC1, host.data()).clone();
            } else {
                std::cerr << "SaveToFile: unsupported dtype with C=" << C << std::endl;
                return;
            }
        }

        // Write by extension via OpenCV
        if (image8u.empty()) {
            std::cerr << "SaveToFile: empty image after conversion" << std::endl;
            return;
        }
        std::vector<int> params;
        // Optional: set JPEG/PNG default qualities
        if (filepath.size() >= 4) {
            std::string lower;
            lower.resize(filepath.size());
            std::transform(filepath.begin(), filepath.end(), lower.begin(), [](unsigned char c){ return (char)std::tolower(c); });
            if (lower.rfind(".jpg") != std::string::npos || lower.rfind(".jpeg") != std::string::npos) {
                params = {cv::IMWRITE_JPEG_QUALITY, 95};
            } else if (lower.rfind(".png") != std::string::npos) {
                params = {cv::IMWRITE_PNG_COMPRESSION, 3};
            }
        }
        if (!cv::imwrite(filepath, image8u, params)) {
            std::cerr << "SaveToFile: failed to write image: " << filepath << std::endl;
        }
    }

    // RAII holder and internal conversion only when ATEN is available

    struct TensorHolder {
        void* mem_ptr;
        rivsatenTensor tensor;
        std::vector<int64_t> shape;
        std::vector<int64_t> strides;
        rivsatenDataType_t dtype;
        int device_id{0};

        TensorHolder(int device_id, const std::vector<int64_t>& in_shape,
                     const std::vector<int64_t>& in_strides,
                     rivsatenDataType_t in_dtype)
            : mem_ptr(nullptr), shape(in_shape), strides(in_strides), dtype(in_dtype),device_id(device_id){
            size_t elements = 1;
            for (auto v : shape) { elements *= static_cast<size_t>(v); }
            size_t bytes = elements * aten_dtype_size(dtype);
            int current_device_id = -1;
            rivsGetDevice(&current_device_id);
            assert(device_id == current_device_id);
            auto ret = rivsMalloc(&mem_ptr, bytes);
            assert(rivsSuccess == ret);
            tensor = rivsatenTensor(
                rivsatenSize_t(shape.data(), shape.size()),
                rivsatenSize_t(strides.data(), strides.size()),
                dtype,
                mem_ptr
            );
        }

        ~TensorHolder() {
            if (mem_ptr) {
                assert(rivsSuccess == rivsFree(mem_ptr));
                mem_ptr = nullptr;
            }
        }
    };

    static void image_converto_internal(int device_id, rivsatenTensor &output,
                           rivsatenTensor &input,
                           const ConvertParam &param)
    {
        //int input_height = input.GetTensorShape().data[0];
        //int input_width = input.GetTensorShape().data[1];
        int roi_width = param.m_srcROI.width;
        int roi_height = param.m_srcROI.height;
        int channel = 3;


        TensorHolder roi_holder(device_id,
            /*shape*/   {roi_height, roi_width, channel},
            /*strides*/ {roi_width * channel, channel, 1},
            /*dtype*/   RIVSATEN_DATA_U8
        );
        auto &roi_tensor = roi_holder.tensor;

        TensorHolder trans_holder(device_id,
            /*shape*/   {channel, roi_height, roi_width},
            /*strides*/ {1, roi_width * channel, channel},
            /*dtype*/   RIVSATEN_DATA_U8
        );
        auto &trans_result = trans_holder.tensor;

        TensorHolder flip_holder(device_id,
            /*shape*/   {channel, roi_height, roi_width},
            /*strides*/ {1, roi_width * channel, channel},
            /*dtype*/   RIVSATEN_DATA_U8
        );
        auto &flip_result = flip_holder.tensor;

        TensorHolder norm_holder(device_id,
            /*shape*/   {channel, roi_height, roi_width},
            /*strides*/ {roi_height * roi_width, roi_width, 1},
            /*dtype*/   RIVSATEN_DATA_FP32
        );
        auto &norm_result = norm_holder.tensor;

        // 1、从input中提取ROI区域
        // resize input_tensor shape and strides
        rivsatenSize_t input_shape = input.GetTensorShape();
        std::vector<int64_t> resize_input_shape = {roi_height, roi_width, channel};
        std::vector<int64_t> resize_input_strides = {input_shape.data[1] * input_shape.data[2], channel, 1};
        rivsatenSize_t new_input_shape(resize_input_shape.data(), resize_input_shape.size());
        rivsatenSize_t new_input_strides(resize_input_strides.data(), resize_input_strides.size());
        input.SetTensorShape(new_input_shape);
        input.SetTensorStrides(new_input_strides);
        //input.SetOffset(input_width * param.m_srcROI.y + param.m_srcROI.x * input_width * channel);

        auto ret = rivsaten::rivsatenCopy(roi_tensor, input, false);
        assert(RIVSATEN_STATUS_SUCCESS == ret);

        // 2、对该部分图像实现 HWC到CHW的 转换
        //resize roi_tensor shape and strides
        // resize roi_tensor shape and strides
        std::vector<int64_t> resize_roi_shape = {channel, roi_height, roi_width};
        std::vector<int64_t> resize_roi_strides = {1, roi_width * channel, channel};
        rivsatenSize_t new_roi_shape(resize_roi_shape.data(), resize_roi_shape.size());
        rivsatenSize_t new_roi_strides(resize_roi_strides.data(), resize_roi_strides.size());
        roi_tensor.SetTensorShape(new_roi_shape);
        roi_tensor.SetTensorStrides(new_roi_strides);

        ret = rivsaten::rivsatenCopy(trans_result, roi_tensor, false);
        assert(RIVSATEN_STATUS_SUCCESS == ret);

        // 3. 按需进行 RGB <-> BGR 通道翻转（当需要交换 RGB 时才翻转）
        int64_t dim = 0; // C 维
        rivsatenSize_t flip_dim(&dim, 1);
        if (param.m_swapRGB) {
            ret = rivsaten::rivsatenFlip(flip_result, trans_result, flip_dim);
        } else {
            ret = rivsaten::rivsatenCopy(flip_result, trans_result, false);
        }
        assert(RIVSATEN_STATUS_SUCCESS == ret);

        // 4. 缩放（归一化）处理：当启用时按 m_scaleFactor 进行除法，否则不缩放（等价于除以 1.0）
        if (param.m_scaleEnabled) {
            rivsatenScalar_t alpha;
            alpha.dtype = RIVSATEN_DATA_FP32;
            alpha.fval = 1.0f/param.m_scaleAlpha;
            ret = rivsaten::rivsatenDiv(norm_result, flip_result, alpha);
            assert(RIVSATEN_STATUS_SUCCESS == ret);
        }

        // 5、将结果存储进output中
        // resize output shape and strides
        rivsatenSize_t output_shape = output.GetTensorShape();
        rivsatenSize_t output_strides = output.GetTensorStrides();
        std::vector<int64_t> resize_output_shape = {channel, roi_height, roi_width};
        std::vector<int64_t> resize_output_strides = {output_shape.data[1] * output_shape.data[2], output_shape.data[2], 1};
        rivsatenSize_t new_output_shape(resize_output_shape.data(), resize_output_shape.size());
        rivsatenSize_t new_output_strides(resize_output_strides.data(), resize_output_strides.size());
        output.SetTensorShape(new_output_shape);
        output.SetTensorStrides(new_output_strides);

        //int device_id = topsatenTensorGetDeviceId(&output);
        ret = rivsaten::rivsatenCopy(output, norm_result, false);
        assert(RIVSATEN_STATUS_SUCCESS == ret);

        // Tensor memory is freed automatically by TensorHolder destructors
        // restore output shape and strides
        output.SetTensorShape(output_shape);
        output.SetTensorStrides(output_strides);

    }

    Rect image_keep_aspect_scale(const Rect& src_rect, const Rect& output_rect) {
        Rect dst_rect{};

        // 边界检查：避免原始矩形宽高为0（防止除以0错误）
        if (src_rect.width <= 0 || src_rect.height <= 0) {
            std::cerr << "错误：原始矩形宽高不能为0！" << std::endl;
            return dst_rect; // 返回空矩形
        }

        // 1. 计算宽度和高度方向的缩放比
        float scale_w = (float)output_rect.width / src_rect.width;  // 按宽度缩放的比例
        float scale_h = (float)output_rect.height / src_rect.height;// 按高度缩放的比例

        // 2. 取较小的缩放比，确保缩放后图像不超出目标矩形
        float final_scale = std::min(scale_w, scale_h);

        // 3. 计算缩放后的目标宽高（可选round()确保整数像素，根据需求调整）
        dst_rect.width = src_rect.width * final_scale;
        dst_rect.height = src_rect.height * final_scale;

        // （可选）若需整数像素（如图像显示场景），取消下方注释
        dst_rect.width = round(dst_rect.width);
        dst_rect.height = round(dst_rect.height);

        return dst_rect;
    }

    // 新接口实现
    int image_convert_to_L3(AVFrame *image, ConvertParam *param, RivsImageTensor *p_tensor)
    {
        if (!image || !param || !p_tensor) return -1;

        const int channel = 3;
        const int in_width = image->width;
        const int in_height = image->height;
        const int in_stride = in_width;

        int out_width = 0;
        int out_height = 0;

        const int src_w = param->m_srcROI.width;
        const int src_h = param->m_srcROI.height;
        if (src_w <= 0 || src_h <= 0) { 
            return -4; 
        }

        //output CHW
        out_height = p_tensor->m_shape[1];
        out_width = p_tensor->m_shape[2];
        if (out_height == 0 || out_width == 0) {
            out_width = param->m_dstROI.width;
            out_height = param->m_dstROI.height;
            if (out_width <= 0 || out_height <= 0) return -2;

            p_tensor->m_data_type = RivsInference::DataType::TIF_FP32;
            p_tensor->m_data_size = out_height * out_width * channel;
            p_tensor->m_shape[0] = channel;
            p_tensor->m_shape[1] = out_height;
            p_tensor->m_shape[2] = out_width;
            assert(rivsSuccess == rivsMalloc(&p_tensor->m_data, p_tensor->m_data_size * sizeof(float)));
            p_tensor->m_own_data = true;
        }

        std::vector<int64_t> in_shape = {in_height, in_width, channel};
        std::vector<int64_t> in_strides = {in_stride * channel, channel, 1};

        std::vector<int64_t> out_shape = {channel, out_height, out_width};
        std::vector<int64_t> out_strides = {out_height * out_width, out_width, 1};

        uint8_t *input_dev = image->data[0];
        if (!input_dev) return -3;

        rivsatenTensor input(rivsatenSize_t(in_shape.data(), in_shape.size()),
                             rivsatenSize_t(in_strides.data(), in_strides.size()),
                             RIVSATEN_DATA_U8,
                             input_dev);
        rivsatenTensor output(rivsatenSize_t(out_shape.data(), out_shape.size()),
                              rivsatenSize_t(out_strides.data(), out_strides.size()),
                              RIVSATEN_DATA_FP32,
                              p_tensor->m_data);

        image_converto_internal(p_tensor->m_device_id, output, input, *param);
        return 0;
    }

     // 新接口实现
     int image_convert_to_L4(AVFrame *image, ConvertParam *param, RivsImageTensor *p_tensor) {
        if (!image || !param || !p_tensor) return -1;
 
        const int channel = 3;
        const int in_width = image->width;
        const int in_height = image->height;
        const int in_stride = in_width;

        int out_width = 0;
        int out_height = 0;
        int device_id = 0;

        const int src_w = param->m_srcROI.width;
        const int src_h = param->m_srcROI.height;
        if (src_w <= 0 || src_h <= 0) {
            return -4;
        }

        //output CHW
        out_height = p_tensor->m_shape[1];
        out_width = p_tensor->m_shape[2];
        if (out_height == 0 || out_width == 0) {
            out_width = param->m_dstROI.width;
            out_height = param->m_dstROI.height;
            if (out_width <= 0 || out_height <= 0) return -2;
            *p_tensor = RivsImageTensor(device_id, {channel, out_height, out_width},RivsInference::TIF_FP32);
        }

        // 1) Wrap AVFrame to cv::Mat (host memory). Support RGB24/BGR24.
        cv::Mat src;
        bool isRGB = false;
        if (image->format == AV_PIX_FMT_BGR24) {
            src = cv::Mat(in_height, in_width, CV_8UC3, image->data[0], image->linesize[0]);
            isRGB = false;
        } else if (image->format == AV_PIX_FMT_RGB24) {
            src = cv::Mat(in_height, in_width, CV_8UC3, image->data[0], image->linesize[0]);
            isRGB = true;
        } else {
            // Not supported in this L4 path without swscale
            return -5;
        }

        cv::Mat src_roi_img = src(cv::Rect(param->m_srcROI.x, param->m_srcROI.y, param->m_srcROI.width, param->m_srcROI.height));
        cv::Mat out_img = cv::Mat::zeros(cv::Size(out_width, out_height), CV_8UC3);
        cv::Mat src_roi_img_resized;
        if (param->m_srcROI.width != param->m_dstROI.width || param->m_srcROI.height != param->m_dstROI.height) {
            // 原图像需要缩放
            float factor = MAX((float)param->m_srcROI.width/param->m_dstROI.width, (float)param->m_srcROI.height/param->m_dstROI.height);
            cv::resize(src_roi_img, src_roi_img_resized, cv::Size(param->m_srcROI.width/factor, param->m_srcROI.height/factor));
        }else {
            src_roi_img_resized = src_roi_img;
        }

        src_roi_img_resized.copyTo(out_img(cv::Rect(param->m_dstROI.x, param->m_dstROI.y, param->m_dstROI.width, param->m_dstROI.height)));

        cv::Mat float_img;
        float alpha = 1.0f;
        if (param->m_scaleEnabled) {
            alpha = param->m_scaleAlpha;
        }

        out_img.convertTo(float_img, CV_32FC3, alpha);
        int planeSize = float_img.cols * float_img.rows;
        size_t hostDataSize = planeSize * channel;
        std::vector<float> hostCHW(hostDataSize);
        //BGR -> RGB plannar
        auto* begin = hostCHW.data();
        cv::Mat b(cv::Size(out_img.cols, out_img.rows), CV_32FC1, begin);
        cv::Mat g(cv::Size(out_img.cols, out_img.rows), CV_32FC1, begin + planeSize);
        cv::Mat r(cv::Size(out_img.cols, out_img.rows), CV_32FC1, begin + (planeSize << 1));
        cv::Mat rgb[3] = {r, g, b};
        cv::split(float_img, rgb);

        // Ensure device buffer allocated and sized
        size_t bytesNeeded = hostCHW.size() * sizeof(float);
        if (p_tensor->m_data == nullptr) {
            assert(rivsSuccess == rivsMalloc(&p_tensor->m_data, bytesNeeded));
            p_tensor->m_own_data = true;
        }
        // Copy host -> device
        auto mret = rivsMemcpy(p_tensor->m_data, hostCHW.data(), bytesNeeded, rivsMemcpyHostToDevice);
        if (mret != rivsSuccess) return -8;

        return 0;
     }

} // namespace rivscv