package m.ai.lab;

import java.nio.FloatBuffer;
import java.util.Collections;

import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;

import ai.onnxruntime.OnnxTensor;
import ai.onnxruntime.OrtEnvironment;
import ai.onnxruntime.OrtSession;
import ai.onnxruntime.OrtSession.Result;
import ai.onnxruntime.OrtSession.SessionOptions;
import ai.onnxruntime.OrtSession.SessionOptions.OptLevel;

/**
 * 人像分割器
 * 
 * @author liuhao
 *
 */
public class Segmentation {
	/**
	 * 模型输入矩阵的数量
	 */
	private final int MODEL_INPUT_NUM = 1;

	/**
	 * 模型输入矩阵的宽度
	 */
	private final int MODEL_INPUT_WIDTH = 513;

	/**
	 * 模型输入矩阵的高度
	 */
	private final int MODEL_INPUT_HEIGHT = 513;

	/**
	 * 模型输入矩阵的颜色通道数
	 */
	private final int MODEL_INPUT_CHANNELS = 3;

	/**
	 * 模型输入分类的数量
	 */
	private final int MODEL_CLASS_NUM = 2;

	/**
	 * 模型输入尺寸
	 */
	Size MODEL_INPUT_SHAPE = new Size(MODEL_INPUT_WIDTH, MODEL_INPUT_HEIGHT);

	/**
	 * 输入预处理
	 * 
	 * @param mat
	 * @param size
	 * @return
	 */
	FloatBuffer preprocess(Mat mat) {
		FloatBuffer floatBuffer = FloatBuffer
				.allocate(MODEL_INPUT_NUM * MODEL_INPUT_WIDTH * MODEL_INPUT_HEIGHT * MODEL_INPUT_CHANNELS);
		floatBuffer.order();
		floatBuffer.rewind();
		Mat copy = new Mat(mat.size(), mat.type());
		Imgproc.resize(mat, copy, MODEL_INPUT_SHAPE);
		for (int i = 0; i < MODEL_INPUT_SHAPE.height; ++i) {
			for (int j = 0; j < MODEL_INPUT_SHAPE.width; ++j) {
				double pixel[] = copy.get(i, j);
				floatBuffer.put((float) pixel[0] / 255f);
				floatBuffer.put((float) pixel[1] / 255f);
				floatBuffer.put((float) pixel[2] / 255f);
			}
		}
		floatBuffer.rewind();
		return floatBuffer;
	}

	public SegmentationResult predict(Mat mat) throws Exception {
		OrtEnvironment env = OrtEnvironment.getEnvironment();
		SessionOptions options = new OrtSession.SessionOptions();
		options.setOptimizationLevel(OptLevel.BASIC_OPT);
		OrtSession session = env.createSession("models/segmentation.onnx", options);
		if (mat.channels() == 4) {
			Imgproc.cvtColor(mat, mat, Imgproc.COLOR_RGBA2RGB);
		}
		Mat copy = mat.clone();
		Imgproc.resize(copy, copy, new Size(513, 513));

		OnnxTensor inputTensor = OnnxTensor.createTensor(env, preprocess(mat),
				new long[] { MODEL_INPUT_NUM, MODEL_INPUT_WIDTH, MODEL_INPUT_HEIGHT, MODEL_INPUT_CHANNELS });
		Result result = session.run(Collections.singletonMap("input_1", inputTensor));
		float[][][][] preds = (float[][][][]) (result.get("bilinear_upsampling_2").get().getValue());
		float[][][] prediction = (float[][][]) preds[0];
		Mat mask = Mat.zeros(new Size(MODEL_INPUT_WIDTH, MODEL_INPUT_HEIGHT), mat.type());
		double[][] COLOR_MAP = new double[][] { new double[] { 255, 255, 255 }, new double[] { 0, 0, 0 } };
		Float[] pred = new Float[MODEL_CLASS_NUM];
		for (int row = 0; row < MODEL_INPUT_HEIGHT; row++) {
			for (int col = 0; col < MODEL_INPUT_WIDTH; col++) {
				float[] _pred = prediction[row][col];
				for (int i = 0; i < _pred.length; i++) {
					pred[i] = _pred[i];
				}
				int idx = Maths.argmax(Maths.FLOAT_COMPARATOR, pred).index;
				mask.put(row, col, COLOR_MAP[idx]);
			}
		}

		Imgproc.resize(mask, mask, new Size(mat.width(), mat.height()));
		Mat resultMat = new Mat();
		Core.add(mat, mask, resultMat);
		env.close();

		return new SegmentationResult(mat, mask, resultMat);
	}
}
