package com.ly.robustvideomatting.model;


import ai.onnxruntime.OnnxTensor;
import ai.onnxruntime.OrtEnvironment;
import ai.onnxruntime.OrtException;
import ai.onnxruntime.OrtSession;
import com.ly.utils.OnnxModelUtils;
import com.ly.utils.WHCUtil;
import lombok.SneakyThrows;
import org.opencv.core.*;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.videoio.VideoCapture;
import org.opencv.videoio.VideoWriter;
import org.opencv.videoio.Videoio;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.FloatBuffer;
import java.util.*;

public class RobustVideoMattingModel {

    private static OrtEnvironment env;
    private static OrtSession session;

    public RobustVideoMattingModel() {
        // 初始化ONNX Runtime环境和会话
        env = OrtEnvironment.getEnvironment();
        OrtSession.SessionOptions options = new OrtSession.SessionOptions();
        InputStream is = RobustVideoMattingModel.class.getResourceAsStream("/onnx/rvm_mobilenetv3_fp32.onnx");
        if (is == null) {
            throw new RuntimeException("rvm_mobilenetv3_fp32.onnx not found");
        }
        inputStreamWriteSession(is, options);
    }

    private void inputStreamWriteSession(InputStream is, OrtSession.SessionOptions options) {
        try (ByteArrayOutputStream buffer = new ByteArrayOutputStream()) {
            int nRead;
            byte[] data = new byte[1024];
            while ((nRead = is.read(data, 0, data.length)) != -1) {
                buffer.write(data, 0, nRead);
            }
            buffer.flush();
            // 创建ONNX会话
            try {
                session = env.createSession(buffer.toByteArray(), options);
            } catch (OrtException e) {
                throw new RuntimeException("Could not create session", e);
            }
            init();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    private void init() {
        nu.pattern.OpenCV.loadLocally();
    }


    @SneakyThrows
    public static void matting(){
        OnnxModelUtils.printModelInputsAndOutputs(session);
        VideoCapture videoCapture = new VideoCapture("C:\\Users\\ly\\Desktop\\resuouce\\matting\\robustvideomatting\\133895-758336556.mp4");
        // 定义VideoWriter对象
        Size frameSize = new Size(1920,1080);
        VideoWriter writer = new VideoWriter("output.mp4",
                VideoWriter.fourcc('X', '2', '6', '4'),
                10,
                frameSize,
                true);
        Mat img = new Mat();
        HighGui.namedWindow("camera");
        // 初始化循环记忆
        float[][][][] recInit = new float[1][1][1][1];
        OnnxTensor rec1 = OnnxTensor.createTensor(env, recInit);
        OnnxTensor rec2 = OnnxTensor.createTensor(env, recInit);
        OnnxTensor rec3 = OnnxTensor.createTensor(env, recInit);
        OnnxTensor rec4 = OnnxTensor.createTensor(env, recInit);
        // 下采样比
        float[] downsampleRatioArr = new float[]{0.25f};
        OnnxTensor downsampleRatio = OnnxTensor.createTensor(env, downsampleRatioArr);
        int index = 0;
        while (videoCapture.read(img)){
            index ++;
            Mat mat = new Mat();
            img.copyTo(mat);
            Imgcodecs.imwrite("1.png",img);
//            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);
            img.convertTo(img, CvType.CV_32FC1, 1. / 255);
            float[] whc = new float[3 * img.cols() * img.rows()];
            img.get(0, 0, whc);
            float[] chw = WHCUtil.whc2cwh(whc);
            //输入形状
            long[] srcShape = new long[]{1, 3, img.rows(), img.cols()};
            FloatBuffer inputBuffer = FloatBuffer.wrap(chw);
            OnnxTensor inputTensor = OnnxTensor.createTensor(env, inputBuffer, srcShape);
            Map<String, OnnxTensor> inputs = new HashMap<>();
            inputs.put("src", inputTensor);
            inputs.put("r1i", rec1);
            inputs.put("r2i", rec2);
            inputs.put("r3i", rec3);
            inputs.put("r4i", rec4);
            inputs.put("downsample_ratio", downsampleRatio);

            OrtSession.Result results = session.run(inputs);
            OnnxTensor fgr = (OnnxTensor) results.get(0);
            OnnxTensor pha = (OnnxTensor) results.get(1);
            rec1.close();
            rec2.close();
            rec3.close();
            rec4.close();
            rec1 = (OnnxTensor) results.get(2);
            rec2 = (OnnxTensor) results.get(3);
            rec3 = (OnnxTensor) results.get(4);
            rec4 = (OnnxTensor) results.get(5);
            // 获取前景和透明度数据
//            float[] fgrData = fgr.getFloatBuffer().array();
            float[] phaData = pha.getFloatBuffer().array();

//            Mat fgrMat = saveImage(fgrData, 1920, 1080, "output.png");
            Mat phaMat = saveImage(phaData, 1920, 1080, "output1.png");
            Mat matting = matting(mat, phaMat);
            Mat mat1 = prepareFrameForVideo(matting);
            Imgcodecs.imwrite("2.png", mat1);
//            writer.write(mat1);
            System.out.println(index);
            HighGui.imshow("camera",mat1);
            HighGui.waitKey(1);
//            break;
        }
    }
    public static Mat prepareFrameForVideo(Mat frame) {
        // 检查图像是否已经是三通道的BGR格式和CV_8UC3类型
        if (frame.type() != CvType.CV_8UC3) {
            Mat converted = new Mat();
            switch (frame.channels()) {
                case 1:
                    // 如果是单通道，将其转换为三通道
                    Imgproc.cvtColor(frame, converted, Imgproc.COLOR_GRAY2BGR);
                    break;
                case 4:
                    // 如果是四通道，假定为BGRA，移除alpha通道
                    Imgproc.cvtColor(frame, converted, Imgproc.COLOR_BGRA2BGR);
                    // 如果已经是三通道但不是CV_8UC3类型，改变数据类型
                    frame.convertTo(converted, CvType.CV_8UC3);
//                default:

                    break;
            }
            return converted;
        } else {
            // 如果已经符合要求，则直接返回原Mat对象
            return frame;
        }
    }


    private static void saveMergedImage(byte[] mergedData, int width, int height, String filename) {
        Mat mergedMat = new Mat(height, width, CvType.CV_8UC4);
        mergedMat.put(0, 0, mergedData);
        Imgcodecs.imwrite(filename, mergedMat);
    }
    private static Mat matting(Mat originalImage, Mat mask) {
        // 确保原始图像和蒙版尺寸相同
        if (!originalImage.size().equals(mask.size())) {
            throw new IllegalArgumentException("Original image and mask must have the same size");
        }

        // 如果蒙版不是单通道，将其转换为单通道
        Mat singleChannelMask = new Mat();
        if (mask.channels() != 1) {
            Imgproc.cvtColor(mask, singleChannelMask, Imgproc.COLOR_BGR2GRAY);
        } else {
            mask.copyTo(singleChannelMask);
        }

        // 创建输出图像
        Mat result = new Mat(originalImage.size(), CvType.CV_8UC4);

        // 将原始图像转换为4通道（如果不是的话）
        Mat originalImageRGBA = new Mat();
        if (originalImage.channels() == 3) {
            Imgproc.cvtColor(originalImage, originalImageRGBA, Imgproc.COLOR_BGR2BGRA);
        } else if (originalImage.channels() == 4) {
            originalImage.copyTo(originalImageRGBA);
        } else {
            throw new IllegalArgumentException("Original image must have 3 or 4 channels");
        }

        // 归一化蒙版到0-1范围
        Mat normalizedMask = new Mat();
        singleChannelMask.convertTo(normalizedMask, CvType.CV_32F, 1.0/255.0);

        // 分离原始图像的通道
        List<Mat> channels = new ArrayList<>();
        Core.split(originalImageRGBA, channels);

        // 应用蒙版到每个通道
        for (int i = 0; i < 3; i++) {  // 只处理BGR通道，保持Alpha通道不变
            Mat temp = new Mat();
            Core.multiply(channels.get(i), normalizedMask, temp, 1, CvType.CV_8U);
            channels.set(i, temp);
        }

        // 设置Alpha通道
        Mat alphaChannel = new Mat();
        normalizedMask.convertTo(alphaChannel, CvType.CV_8U, 255);
        channels.set(3, alphaChannel);

        // 合并通道
        Core.merge(channels, result);
//        Imgcodecs.imwrite("1.png", result);
        return result;
    }

//    private static void matting(Mat fgrMat, Mat phaMat) {
//        // 将前景从 RGB 转换为 BGRA
//        Mat fgrBGRA = new Mat();
//        Imgproc.cvtColor(fgrMat, fgrBGRA, Imgproc.COLOR_RGB2BGRA);
//
//        // 使用蒙版作为 Alpha 通道
//        List<Mat> bgra = new ArrayList<>(4);
//        Core.split(fgrBGRA, bgra);
//        bgra.set(3, phaMat);  // 将蒙版设置为 Alpha 通道
//
//        // 合并通道
//        Mat result = new Mat();
//        Core.merge(bgra, result);
//
//        // 保存合并后的图像
//        Imgcodecs.imwrite("output_merged.png", result);
//    }


    public static Mat saveImage(float[] data, int width, int height, String filename) {
        // Create a Mat object with the data
        Mat mat = new Mat(height, width, CvType.CV_32FC1);
        mat.put(0, 0, data);

        // Convert the Mat object to an 8-bit image
        Mat mat8Bit = new Mat();
        mat.convertTo(mat8Bit, CvType.CV_8UC1, 255.0);

        // Optionally resize the image if necessary
        Mat resizedImage = new Mat();
        Imgproc.resize(mat8Bit, resizedImage, new Size(width, height));

        // Save the image
//        Imgcodecs.imwrite(filename, resizedImage);
        return resizedImage;
    }

    public static void main(String[] args) {
        RobustVideoMattingModel robustVideoMattingModel = new RobustVideoMattingModel();
        matting();
    }


}
