/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

package win.smartown.android.library.certificateCamera.detection;

import android.content.res.AssetManager;
import android.graphics.Bitmap;
import android.graphics.Matrix;
import android.util.Log;

import org.tensorflow.Graph;
import org.tensorflow.contrib.android.TensorFlowInferenceInterface;

import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;


public class FaceModel {
    private static final String MODEL_FILE = "file:///android_asset/mtcnn.pb";
    // Only return this many results.
    private static final int MAX_RESULTS = 100;
    private static final int BYTE_SIZE_OF_FLOAT = 4;
    private static float brennerValueNew;
    private static double brennerValueNewY;
    // Config values.
    private String inputName;

    // Pre-allocated buffers.
    private FloatBuffer outputProbs;
    private FloatBuffer outputBoxes;
    private FloatBuffer outputLandmark;
    private String[] outputNames;

    private TensorFlowInferenceInterface inferenceInterface;
    private Bitmap bm;

    /**
     * Initializes a native TensorFlow session for classifying images.
     *
     * @param assetManager The asset manager to be used to load assets.
     */
    public static FaceModel create(
            final AssetManager assetManager) {
        final FaceModel d = new FaceModel();

        d.inferenceInterface = new TensorFlowInferenceInterface(assetManager, MODEL_FILE);
        final Graph g = d.inferenceInterface.graph();

        d.inputName = "input";
        if (g.operation(d.inputName) == null)
            throw new RuntimeException("Failed to find input Node '" + d.inputName + "'");

        d.outputNames = new String[] {"prob", "landmarks", "box"};
        if (g.operation(d.outputNames[0]) == null)
            throw new RuntimeException("Failed to find output Node '" + d.outputNames[0] + "'");
        if (g.operation(d.outputNames[1]) == null)
            throw new RuntimeException("Failed to find output Node '" + d.outputNames[1] + "'");
        if (g.operation(d.outputNames[2]) == null)
            throw new RuntimeException("Failed to find output Node '" + d.outputNames[2] + "'");

        // Pre-allocate buffers.
        ByteBuffer byteBuffer = ByteBuffer.allocateDirect(MAX_RESULTS * BYTE_SIZE_OF_FLOAT);
        byteBuffer.order(ByteOrder.nativeOrder());
        d.outputProbs = byteBuffer.asFloatBuffer();
        d.outputLandmark = ByteBuffer.allocateDirect(MAX_RESULTS * BYTE_SIZE_OF_FLOAT * 2 * 5)
                .order(ByteOrder.nativeOrder())
                .asFloatBuffer();
        d.outputBoxes = ByteBuffer.allocateDirect(MAX_RESULTS * BYTE_SIZE_OF_FLOAT * 4)
                .order(ByteOrder.nativeOrder())
                .asFloatBuffer();

        return d;
    }

    public FaceModel() {}

    /**
     * @方法描述 Bitmap转RGB
     */
    public static float[] getBGRFromBMP(Bitmap bmp) {
        //清晰度判断
        float brennerValue = 0; // 图片质量模糊度指标，会和图片大小进行正则
        double brennerValueY = 0.0; // 图片质量模糊度指标，会和图片大小进行正则
        double Sx = 0; // 水平方向的rgb像素值的梯度
        int SY = 0; // 水平方向的灰度值的梯度
        int bw = bmp.getWidth(), bh = bmp.getHeight();
        Log.e("myLog", "BitmapWidth:" + bw + "BitmapHeight" + bh);
        int intValues[] = new int[bw * bh];
        float floatValues[] = new float[bw * bh * 3];
        //  Log.e("mylog","rgbstart");
        bmp.getPixels(intValues, 0,bw, 0, 0, bw, bh);

        // BGR
        for (int i = 0; i < intValues.length; ++i) {
            int p = intValues[i];
            //  Log.e("mylog","p 0 " + p);
            floatValues[i * 3 + 0] = p & 0xFF;
            // Log.e("mylog","p 1 " + p);
            floatValues[i * 3 + 1] = (p >> 8) & 0xFF;
            // Log.e("mylog","p 2 " + (p >> 8));
            floatValues[i * 3 + 2] = (p >> 16) & 0xFF;
            //  Log.e("mylog","p 3 " + (p >> 16));

            if (i < intValues.length-2 && i > 1){
                // Y=0.3R+ 0.59G +0.11B，以这个亮度值表达图像的灰度值。
//                double Yi = 0.3*floatValues[i * 3 + 2] + 0.59*floatValues[i * 3 + 1] + 0.11*floatValues[i * 3 + 0];
//                double Yj = 0.3*floatValues[(i-2) * 3 + 2] + 0.59*floatValues[(i-2) * 3 + 1] + 0.11*floatValues[(i-2) * 3 + 0];
//                SY = (int) Math.pow(Yi-Yj, 2); // x方向梯度，水平方向, 灰度值的梯度
                Sx = Math.pow(floatValues[i * 3] - floatValues[(i-2) * 3], 2) ;  // x方向梯度，水平方向
            }
            brennerValue += Sx;  // 求总和
            brennerValueY += SY; // 求灰度梯度总和
        }
        brennerValue = brennerValue / intValues.length;  // 基于图片大小进行正则
        brennerValueY = brennerValueY / intValues.length;  // 基于图片大小进行正则
        brennerValueNew = brennerValue;
        brennerValueNewY = brennerValueY;
        return floatValues;
    }

    public ScanResult detect(ScanResult result) {
        Log.e("mylog1", "***************************** FaceModel start *****************************");
        int isRight = 1; // 成功：0，失败：1
        int pictureStata = -1; // 身份证照片的状态，模糊：-1，正常：0；
        int brennerMR = 100; // 清晰度阈值
        float scaleDown = 0.3f;  // 图片缩小的比例，对于身份证的ocr识别可以相对大点。
        float faceRaceMr = 0.7f;//人脸识别概率阈值
        int scaleBitmap = 1; // bitmap 截取的比例,假如为2，则截取图片坐上部分的 1/4.

        Bitmap bitmap = result.bitmap;

        long time0, time1, time2, time3, time4, time_model_2_0, time_model_2_1,time10,time11;
        time0 = System.currentTimeMillis();

        // 缩小BITMAP
        Matrix matrix = new Matrix();
        matrix.setScale(scaleDown, scaleDown);
        int bitWidth = (int) Math.ceil(bitmap.getWidth() / scaleBitmap);
        int bitHeight = (int) Math.ceil(bitmap.getHeight() / scaleBitmap);
        bm = Bitmap.createBitmap(bitmap, 0, 0, bitWidth, bitHeight, matrix, true);
        bitmap = bm;
        time1 = System.currentTimeMillis();

        int bw = bitmap.getWidth(), bh = bitmap.getHeight();
        // 获取图像的BGR格式
        time10 = System.currentTimeMillis();
        float[] floatValues = getBGRFromBMP(bitmap);
        time11 = System.currentTimeMillis();
        // 获取图像的清晰度判别指标
        float brennerValue = brennerValueNew;

        time2 = System.currentTimeMillis();

        // 基于图片brenner梯度值，进行过滤，brenner梯度值可以反应图片的清晰度
        if (brennerValue <= brennerMR) {
            // 清晰度不够退出
            result.isSucess = isRight;
            result.status = pictureStata;
            Log.e("myLog", "brennerValue <= brennerMR isSuccess:" + result.isSucess + "brennerValue <= brennerMR status:" + result.status);
            return result;
        }

        outputProbs.clear();
        outputLandmark.clear();
        outputBoxes.clear();
        Log.e("mylog", "startModel");
        // Copy the input data into TensorFlow.
        inferenceInterface.feed(inputName, floatValues, bh, bw, 3);
        // Run the inference call.
        inferenceInterface.run(outputNames, false);
        // Copy the output Tensor back into the output array.
        inferenceInterface.fetch(outputNames[0], outputProbs);
        inferenceInterface.fetch(outputNames[1], outputLandmark);
        inferenceInterface.fetch(outputNames[2], outputBoxes);

        outputProbs.flip();
        outputLandmark.flip();
        outputBoxes.flip();
        Log.e("mylog", "endModel");

        time3 = System.currentTimeMillis();

        int len = outputProbs.remaining();
        int len2 = outputLandmark.remaining();
        int len3 = outputBoxes.remaining();
        Log.e("mylog", "len:" + len + "len2:" + len2 + "len3:" + len3);

        float outputProbs_new = 0;
        if (len > 0) outputProbs_new = outputProbs.get(len - 1);
        Log.e("mylog", "outputProbs_new = " + outputProbs_new);

        //remaining是零就不要走下面的了,outputProbs_new人脸的概率
        if (outputProbs_new > faceRaceMr && len2 != 0 && len3 != 0) {
            Log.e("mylog", "get face picture success");

            // 说明图片可以识别出人脸
            //point [0:4]是五个关键点在水平方向上的坐标(列坐标)，point [5:9]是五个关键点在垂直方向上的坐标（行坐标）。
            float left_eye_y = outputLandmark.get();
            float right_eye_y = outputLandmark.get();

            Log.e("mylog", "get face picture success of MODEL_FACE");
            if (Math.abs(left_eye_y - right_eye_y) < bh / 40) {
                // 说明图片识别出人脸
                isRight = 0;
                pictureStata = 0;
            } else {
                // 说明图片识别出人脸,但人脸角度倾斜较大
                isRight = 1;
                pictureStata = 0; }

        }

        outputProbs.compact();
        outputLandmark.compact();
        outputBoxes.compact();

        result.isSucess = isRight;
        result.status = pictureStata;
        time4 = System.currentTimeMillis();
        Log.e("mylog", "cost0=" + (time1 - time0) + "; cost1=" + (time2 - time1) + "; cost2=" + (time3 - time2) + "; cost3=" + (time4 - time3));
        Log.e("mylog", "getBGRFromBMP cost:" + (time11 - time10));
        Log.e("MTC","isSucess:"+result.isSucess+result.isSucess + "; status = " + result.status+"isStart:"+result.isStart);

        return result;
    }

    public String getStatString() {
        return inferenceInterface.getStatString();
    }

    public void close() {
        inferenceInterface.close();
    }

}
