package com.ly.matting.model;

import ai.onnxruntime.OnnxTensor;
import ai.onnxruntime.OrtEnvironment;
import ai.onnxruntime.OrtException;
import ai.onnxruntime.OrtSession;
import com.ly.robustvideomatting.model.RobustVideoMattingModel;
import com.ly.utils.OnnxModelUtils;
import com.ly.utils.WHCUtil;
import lombok.SneakyThrows;
import org.opencv.core.*;
import org.opencv.imgproc.Imgproc;
import org.springframework.stereotype.Component;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.FloatBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;


@Component
public class PortraitMattingService {


    private static OrtEnvironment env;
    private static OrtSession session;

    public PortraitMattingService() {
        // 初始化ONNX Runtime环境和会话
        env = OrtEnvironment.getEnvironment();
        OrtSession.SessionOptions options = new OrtSession.SessionOptions();
        InputStream is = RobustVideoMattingModel.class.getResourceAsStream("/onnx/rvm_mobilenetv3_fp32.onnx");
        if (is == null) {
            throw new RuntimeException("rvm_mobilenetv3_fp32.onnx not found");
        }
        inputStreamWriteSession(is, options);
    }

    private void inputStreamWriteSession(InputStream is, OrtSession.SessionOptions options) {
        try (ByteArrayOutputStream buffer = new ByteArrayOutputStream()) {
            int nRead;
            byte[] data = new byte[1024];
            while ((nRead = is.read(data, 0, data.length)) != -1) {
                buffer.write(data, 0, nRead);
            }
            buffer.flush();
            // 创建ONNX会话
            try {
                session = env.createSession(buffer.toByteArray(), options);
                //打印输入输出
                OnnxModelUtils.printModelInputsAndOutputs(session);
            } catch (OrtException e) {
                throw new RuntimeException("Could not create session", e);
            }
            init();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    private void init() {
        nu.pattern.OpenCV.loadLocally();
    }


    @SneakyThrows
    public Mat portraitMatting(Mat originalMat, Mat backgroundMat, Size size) {
        Mat mat = new Mat();
        originalMat.copyTo(mat);
        OnnxTensor rec1 = null, rec2 = null, rec3 = null, rec4 = null, downsampleRatio = null, inputTensor = null;
        OrtSession.Result results = null;
        Mat phaMat = null;
        OnnxTensor pha = null;
        try {
            // 初始化循环记忆
            float[][][][] recInit = new float[1][1][1][1];
            rec1 = OnnxTensor.createTensor(env, recInit);
            rec2 = OnnxTensor.createTensor(env, recInit);
            rec3 = OnnxTensor.createTensor(env, recInit);
            rec4 = OnnxTensor.createTensor(env, recInit);

            // 下采样比
            float[] downsampleRatioArr = new float[]{0.25f};
            downsampleRatio = OnnxTensor.createTensor(env, downsampleRatioArr);
            FloatBuffer inputBuffer = getInputFloatBuffer(originalMat);
            inputTensor = OnnxTensor.createTensor(env, inputBuffer, new long[]{1, 3, originalMat.rows(), originalMat.cols()});
            Map<String, OnnxTensor> inputs = new HashMap<>();
            inputs.put("src", inputTensor);
            inputs.put("r1i", rec1);
            inputs.put("r2i", rec2);
            inputs.put("r3i", rec3);
            inputs.put("r4i", rec4);
            inputs.put("downsample_ratio", downsampleRatio);
            results = session.run(inputs);
            pha = (OnnxTensor) results.get(1);
            float[] phaData = pha.getFloatBuffer().array();
            phaMat = floatToMat(phaData, 1920, 1080);
            Mat matting = matting(mat, phaMat, backgroundMat);
            Imgproc.resize(matting, matting, size);
            return matting;
        } finally {
            // 释放所有的资源
            mat.release();
            if (phaMat != null) phaMat.release();
            if (rec1 != null) rec1.close();
            if (rec2 != null) rec2.close();
            if (rec3 != null) rec3.close();
            if (rec4 != null) rec4.close();
            if (downsampleRatio != null) downsampleRatio.close();
            if (inputTensor != null) inputTensor.close();
            if (results != null) results.close();
        }
    }

    private static FloatBuffer getInputFloatBuffer(Mat originalMat) {
        originalMat.convertTo(originalMat, CvType.CV_32FC1, 1. / 255);
        float[] whc = new float[3 * originalMat.cols() * originalMat.rows()];
        originalMat.get(0, 0, whc);
        float[] chw = WHCUtil.whc2cwh(whc);
        //输入形状
        FloatBuffer inputBuffer = FloatBuffer.wrap(chw);
        return inputBuffer;
    }


    private static Mat matting(Mat originalImage, Mat mask, Mat background) {
        Mat result = null;
        Mat originalImageRGBA = new Mat();
        Mat backgroundRGBA = new Mat();
        Mat maskFloat = new Mat();
        List<Mat> originalChannels = new ArrayList<>();
        List<Mat> backgroundChannels = new ArrayList<>();

        try {
            // 确保原始图像、蒙版和背景图像尺寸相同
            if (!originalImage.size().equals(mask.size()) || !originalImage.size().equals(background.size())) {
                throw new IllegalArgumentException("Original image, mask, and background must have the same size");
            }

            // 将原始图像转换为4通道（如果不是的话）
            if (originalImage.channels() == 3) {
                Imgproc.cvtColor(originalImage, originalImageRGBA, Imgproc.COLOR_BGR2BGRA);
            } else if (originalImage.channels() == 4) {
                originalImage.copyTo(originalImageRGBA);
            } else {
                throw new IllegalArgumentException("Original image must have 3 or 4 channels");
            }

            // 确保背景图像是4通道
            if (background.channels() == 3) {
                Imgproc.cvtColor(background, backgroundRGBA, Imgproc.COLOR_BGR2BGRA);
            } else if (background.channels() == 4) {
                background.copyTo(backgroundRGBA);
            } else {
                throw new IllegalArgumentException("Background image must have 3 or 4 channels");
            }

            // 转换蒙版至适用于计算的类型
            mask.convertTo(maskFloat, CvType.CV_32F, 1.0 / 255.0);

            // 分离原始图像的通道
            Core.split(originalImageRGBA, originalChannels);

            // 分离背景图像的通道
            Core.split(backgroundRGBA, backgroundChannels);

            // 应用蒙版到每个通道，并与背景混合
            for (int i = 0; i < 3; i++) {
                Mat foreground = new Mat();
                Mat backgroundPart = new Mat();
                Mat channel = new Mat();
                Mat inverseMask = new Mat();

                try {
                    originalChannels.get(i).convertTo(channel, CvType.CV_32F);
                    Core.multiply(channel, maskFloat, foreground);

                    Core.subtract(Mat.ones(maskFloat.size(), CvType.CV_32F), maskFloat, inverseMask);
                    backgroundChannels.get(i).convertTo(channel, CvType.CV_32F);
                    Core.multiply(channel, inverseMask, backgroundPart);

                    Core.add(foreground, backgroundPart, foreground);
                    foreground.convertTo(originalChannels.get(i), CvType.CV_8U);
                } finally {
                    foreground.release();
                    backgroundPart.release();
                    channel.release();
                    inverseMask.release();
                }
            }

            // 设置Alpha通道
            Mat alphaChannel = new Mat(mask.size(), CvType.CV_8U, new Scalar(255));
            originalChannels.set(3, alphaChannel);

            // 合并通道
            result = new Mat();
            Core.merge(originalChannels, result);

        } finally {
            // 释放所有创建的Mat对象
            originalImageRGBA.release();
            backgroundRGBA.release();
            maskFloat.release();
            for (Mat channel : originalChannels) {
                channel.release();
            }
            for (Mat channel : backgroundChannels) {
                channel.release();
            }
        }

        return result;
    }

    public static Mat floatToMat(float[] data, int width, int height) {
        Mat resizedImage;
        Mat mat = new Mat(height, width, CvType.CV_32FC1);
        try {
            mat.put(0, 0, data);
            Mat mat8Bit = new Mat();
            mat.convertTo(mat8Bit, CvType.CV_8UC1, 255.0);
            resizedImage = new Mat();
            Imgproc.resize(mat8Bit, resizedImage, new Size(width, height));
        } finally {
            mat.release();
        }
        return resizedImage;
    }


    public static void main(String[] args) {
        PortraitMattingService robustVideoMattingModel = new PortraitMattingService();
        long l = System.currentTimeMillis();
        Mat greenImage = new Mat(1080, 1920, CvType.CV_8UC3, new Scalar(0, 255, 0));
//        portraitMatting(Imgcodecs.imread("1.png"), greenImage);
        System.out.println(System.currentTimeMillis() - l);
    }

    //    private static Mat matting(Mat originalImage, Mat mask) {
//        // 确保原始图像和蒙版尺寸相同
//        if (!originalImage.size().equals(mask.size())) {
//            throw new IllegalArgumentException("Original image and mask must have the same size");
//        }
//        // 将原始图像转换为4通道（如果不是的话）
//        Mat originalImageRGBA = new Mat();
//        if (originalImage.channels() == 3) {
//            Imgproc.cvtColor(originalImage, originalImageRGBA, Imgproc.COLOR_BGR2BGRA);
//        } else if (originalImage.channels() == 4) {
//            originalImage.copyTo(originalImageRGBA);
//        } else {
//            throw new IllegalArgumentException("Original image must have 3 or 4 channels");
//        }
//        mask.convertTo(mask, CvType.CV_32F, 1.0 / 255.0);
//        // 分离原始图像的通道
//        List<Mat> channels = new ArrayList<>();
//        Core.split(originalImageRGBA, channels);
//        // 应用蒙版到每个通道
//        for (int i = 0; i < 3; i++) {  // 只处理BGR通道，保持Alpha通道不变
//            Mat temp = new Mat();
//            Core.multiply(channels.get(i), mask, temp, 1, CvType.CV_8U);
//            channels.set(i, temp);
//        }
//        // 设置Alpha通道
//        Mat alphaChannel = new Mat();
//        mask.convertTo(alphaChannel, CvType.CV_8U, 255);
//        channels.set(3, alphaChannel);
//        // 合并通道
//        Core.merge(channels, originalImage);
//        return originalImage;
//    }

//    private static Mat matting(Mat originalImage, Mat mask) {
//        // 确保原始图像和蒙版尺寸相同
//        if (!originalImage.size().equals(mask.size())) {
//            throw new IllegalArgumentException("Original image and mask must have the same size");
//        }
//        // 将原始图像转换为4通道（如果不是的话）
//        Mat originalImageRGBA = new Mat();
//        if (originalImage.channels() == 3) {
//            Imgproc.cvtColor(originalImage, originalImageRGBA, Imgproc.COLOR_BGR2BGRA);
//        } else if (originalImage.channels() == 4) {
//            originalImage.copyTo(originalImageRGBA);
//        } else {
//            throw new IllegalArgumentException("Original image must have 3 or 4 channels");
//        }
//        // 转换蒙版至适用于计算的类型
//        Mat maskFloat = new Mat();
//        mask.convertTo(maskFloat, CvType.CV_32F, 1.0 / 255.0);
//
//        // 分离原始图像的通道
//        List<Mat> channels = new ArrayList<>();
//        Core.split(originalImageRGBA, channels);
//
//        // 应用蒙版到每个通道
//        for (int i = 0; i < 3; i++) {  // 只处理BGR通道，保持Alpha通道不变
//            Core.multiply(channels.get(i), maskFloat, channels.get(i), 1, CvType.CV_8U);
//        }
//
//        // 设置Alpha通道
//        Mat alphaChannel = new Mat(mask.size(), CvType.CV_8U, new Scalar(255));
//        channels.set(3, alphaChannel);
//
//        // 合并通道
//        Core.merge(channels, originalImageRGBA);
//        return originalImageRGBA;
//    }
}
