/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

package win.smartown.android.library.faceEffects.detection;

import android.content.res.AssetManager;
import android.graphics.Bitmap;
import android.graphics.Matrix;
import android.util.Log;

import org.tensorflow.Graph;
import org.tensorflow.contrib.android.TensorFlowInferenceInterface;

import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import java.util.Arrays;


public class MTCNN {
    private static final String MODEL_FILE = "file:///android_asset/mtcnn.pb";
    // Only return this many results.
    private static final int MAX_RESULTS = 100;
    private static final int BYTE_SIZE_OF_FLOAT = 4;

    // Config values.
    private String inputName;

    // Pre-allocated buffers.
    private FloatBuffer outputProbs;
    private FloatBuffer outputBoxes;
    private FloatBuffer outputLandmark;
    private String[] outputNames;

    private TensorFlowInferenceInterface inferenceInterface;
    private Bitmap bm;
    int model; // model 为 0 表示识别身份证模式，1表示识别人脸
    public final static int MODEL_OCR=0;
    public final static int MODEL_FACE=1;
    public final static int MODEL_OCR_BACK=2;
    public final static int MODEL_INTENT=3;

    public final static int CMD_INTENT_START=1000;
    public final static int CMD_INTENT_LOOKUP=1001; // 意愿动作, 抬头
    public final static int CMD_INTENT_LOOKDOWN=1002;// 意愿动作, 低头

    int intentType=CMD_INTENT_START; //

    public int[] willingAction = {CMD_INTENT_LOOKUP, CMD_INTENT_LOOKDOWN, CMD_INTENT_LOOKUP};  // 意愿动作
    public int[] willingActionResult = new int[3]; // 意愿动作的完成

    /**
     * Initializes a native TensorFlow session for classifying images.
     *
     * @param assetManager The asset manager to be used to load assets.
     */
    public static MTCNN create(
            final AssetManager assetManager,int model) {
        final MTCNN d = new MTCNN();

        d.model = model;

        d.inferenceInterface = new TensorFlowInferenceInterface(assetManager, MODEL_FILE);

        final Graph g = d.inferenceInterface.graph();

        d.inputName = "input";
        if (g.operation(d.inputName) == null)
            throw new RuntimeException("Failed to find input Node '" + d.inputName + "'");

        d.outputNames = new String[] {"prob", "landmarks", "box"};
        if (g.operation(d.outputNames[0]) == null)
            throw new RuntimeException("Failed to find output Node '" + d.outputNames[0] + "'");

        if (g.operation(d.outputNames[1]) == null)
            throw new RuntimeException("Failed to find output Node '" + d.outputNames[1] + "'");

        if (g.operation(d.outputNames[2]) == null)
            throw new RuntimeException("Failed to find output Node '" + d.outputNames[2] + "'");

        // Pre-allocate buffers.
        ByteBuffer byteBuffer = ByteBuffer.allocateDirect(MAX_RESULTS * BYTE_SIZE_OF_FLOAT);
        byteBuffer.order(ByteOrder.nativeOrder());
        d.outputProbs = byteBuffer.asFloatBuffer();

        d.outputLandmark = ByteBuffer.allocateDirect(MAX_RESULTS * BYTE_SIZE_OF_FLOAT * 2 * 5)
                .order(ByteOrder.nativeOrder())
                .asFloatBuffer();

        d.outputBoxes = ByteBuffer.allocateDirect(MAX_RESULTS * BYTE_SIZE_OF_FLOAT * 4)
                .order(ByteOrder.nativeOrder())
                .asFloatBuffer();

        return d;
    }

    public MTCNN() {}

    /**
     * @方法描述 Bitmap转RGB
     */
    public static float[] getBGRFromBMP(Bitmap bmp) {
        //清晰度判断
        float brennerValue = 0; // 图片质量模糊度指标，会和图片大小进行
        double Sx = 0; // 水平方向的rgb像素值的梯度
        int bw = bmp.getWidth(), bh = bmp.getHeight();
        Log.e("myLog", "BitmapWidth:" + bw + "BitmapHeight" + bh);
        int intValues[] = new int[bw * bh];
        float floatValues[] = new float[bw * bh * 3 + 1];
        Log.e("mylog","rgbstart");
        bmp.getPixels(intValues, 0,bw, 0, 0, bw, bh);
        // BGR
        for (int i = 0; i < intValues.length; ++i) {
            int p = intValues[i];
            floatValues[i * 3 + 0] = p & 0xFF;
            floatValues[i * 3 + 1] = (p >> 8) & 0xFF;
            floatValues[i * 3 + 2] = (p >> 16) & 0xFF;
            if (i < intValues.length-2 && i > 1){
                Sx = Math.pow(floatValues[i * 3] - floatValues[(i-2) * 3], 2) ;  // x方向梯度，水平方向
            }
            brennerValue += Sx;  // 求总和
        }
        brennerValue = brennerValue / intValues.length;  // 基于图片大小进行正则
        floatValues[bw * bh * 3] = brennerValue;
        return floatValues;
    }

    static int rgb2hsv_max(int a,int b,int c) {
        int max = a;
        if(b > max) max = b;
        if(c > max) max = c;
        return max;
    }

    static int rgb2hsv_min(int a,int b,int c) {
        int min = a;
        if(b < min) min = b;
        if(c < min) min = c;
        return min;
    }

    static int[] rgb2hsv(int r,int g,int b) {
        int imax,imin,diff,h,s,v;
        int[] HSV = new int[3];
        imax = rgb2hsv_max(r,g,b);
        imin = rgb2hsv_min(r,g,b);
        diff = imax - imin;
        v = imax;
        if(imax == 0) {s = 0;}
        else {s = diff;}

        if(diff != 0)
        {
            if(r == imax)
            { h = 60 * (g - b) / diff; }
            else if(g == imax)
            { h = 60 * (b - r) / diff + 120; }
            else
            { h = 60 * (r - g) / diff + 240; }
            if(h < 0) h = h + 360;
        }
        else {h = -1;}
        HSV[0] = h; HSV[1] = s; HSV[2] = v;

        return HSV;
    }

    public static int calAngle(float x1, float x2, float x3, float y1, float y2, float y3) {
        double lenAB ,lenBC ,lenAC;

        double a = x1 - x2;double b = y1 - y2;
        lenAB = Math.sqrt(a * a + b * b);

        double c = x2 - x3;double d = y2 - y3;
        lenBC = Math.sqrt(c * c + d * d);

        double e = x1 - x3;double f = y1 - y3;
        lenAC = Math.sqrt(e * e + f * f);

        if ((lenAB + lenBC <= lenAC) || (lenBC + lenAC <= lenAB) || (lenAB + lenAC <= lenBC)) {
            return -1; }

        //值
        double cosV = (lenAB * lenAB + lenBC * lenBC - lenAC * lenAC) / (2 * lenAB * lenBC);
        //弧度
        double radian = Math.acos(cosV);
        //角度
        return (int) Math.toDegrees(radian);
    }

    static int getRedNum(float[] floatValues) {
        int redNum = 0;
        for (int i = 0; i < floatValues.length / 3; ++i) {
            int[] HSV = rgb2hsv((int) floatValues[i * 3 + 2], (int) floatValues[i * 3 + 1], (int) floatValues[i * 3]);
            if (
                    (HSV[0] > 0 && HSV[0] < 34) || (HSV[0] > 160 && HSV[0] < 200)
                            && (HSV[1] >= 90 && HSV[1] <= 200)
                            && (HSV[2] >= 100 && HSV[2] <= 255)
            ) redNum += 1;
        }
        return redNum;
    }

    public ScanResult detect(ScanResult result) {

        Log.e("mylog1", "***************************** MTCNN start *****************************");
        int isRight = 1; // 成功：0，失败：1
        int pictureStata = -1; // 身份证照片的状态，模糊：-1，正常：0，偏上：1，偏下：2，偏左：3，偏右：4，偏小：5，偏大：6，正常：7，抬头:8,低头：9；
        int brennerMR = 50; // 清晰度阈值
        float scaleDown = 0.4f;  // 图片缩小的比例，对于身份证的ocr识别可以相对大点。
        float faceRaceMr = 0.6f;//人脸识别概率阈值
        int scaleBitmap = 1; // bitmap 截取的比例,假如为2，则截取图片坐上部分的 1/4.
        int radianUp, radianUpNum = 75;// radianUp 由人像的5点，取左右眼，鼻子，计算鼻子三角的上方角度,radianUpNum 判断抬头的阈值
        int radianDown, radianDownNum = 85; // 由人像的5点，取左右嘴角，鼻子，计算鼻子三角的下方角度,radianDownNum 判断低头的阈值
        if (model == 1) { // 活体检测，真实人脸识别
            brennerMR = 100;scaleDown = 0.3f;faceRaceMr = 0.7f;
        } else if (model == 2) { // 身份证背面识别
            // 小米 11
//            brennerMR = 100;scaleDown = 0.3f;scaleBitmap = 2;
            // 红米9
            brennerMR = 150;scaleDown = 0.3f;scaleBitmap = 2; }
        else if (model == 3) { // 意愿检测
            brennerMR = 300;scaleDown = 0.1f;faceRaceMr = 0.6f;
        if(willingActionResult[0]==0)brennerMR = 400;}

        Bitmap bitmap = result.bitmap;
        Log.e("mylog", "start this line ***************************************** model of " + model);

        long time0, time1, time2, time3, time4, time_model_2_0, time_model_2_1;
        time0 = System.currentTimeMillis();

        // 缩小BITMAP
        Matrix matrix = new Matrix();
        matrix.setScale(scaleDown, scaleDown);
        int bitWidth = (int) Math.ceil(bitmap.getWidth() / scaleBitmap);
        int bitHeight = (int) Math.ceil(bitmap.getHeight() / scaleBitmap);
        bm = Bitmap.createBitmap(bitmap, 0, 0, bitWidth, bitHeight, matrix, true);
        bitmap = bm;
        time1 = System.currentTimeMillis();
        // Preprocess the image data from 0-255 int to normalized float based
        // on the provided parameters.
        int bw = bitmap.getWidth(), bh = bitmap.getHeight();
        float[] floatValuesOld = getBGRFromBMP(bitmap);
        // 获取图像的BGR格式
        float[] floatValues = Arrays.copyOfRange(floatValuesOld, 0, floatValuesOld.length - 1);
        // 获取图像的清晰度判别指标
        float brennerValue = floatValuesOld[floatValuesOld.length - 1];
        Log.e("mylog", "brennerMR = " + brennerMR);
        Log.e("mylog", "brennerValue = " + brennerValue);

        Log.e("mylog", "rgbnd");
        time2 = System.currentTimeMillis();
        Log.e("mylog", "startModel");

        // 基于图片brenner梯度值，进行过滤，brenner梯度值可以反应图片的清晰度
        if (brennerValue <= brennerMR) {
            Log.e("mylog1", "notlegi");
            // 清晰度不够退出
            result.isSucess = isRight;
            result.status = pictureStata;
            Log.e("myLog", "brennerValue <= brennerMR isSuccess:" + result.isSucess + "brennerValue <= brennerMR status:" + result.status);
            return result;
        }

        if (model == 2) {
            time_model_2_0 = System.currentTimeMillis();
            Log.e("mylog2", "floatValues : " + floatValues.length);
            Log.e("mylog", "get picture success of MODEL_OCR_BACK");

            // 对身份证国徽那一面进行识别,获取图片左上 1/4 区间的红色色块的大小
            int redNum = getRedNum(floatValues);
            Log.e("mylog2", "redNum : " + redNum);
            if (redNum > 1000 && redNum < 28000)  {pictureStata = 0; isRight = 0;}

            time_model_2_1 = System.currentTimeMillis();
            Log.e("myLog", "model_2_total_cost:" + (time_model_2_1 - time0));
            Log.e("myLog", "model_2_cost:" + (time_model_2_1 - time_model_2_0));
            result.isSucess = isRight;
            result.status = pictureStata;
            Log.e("myLog", "MODEL_OCR_BACK isSuccess:" + result.isSucess + "MODEL_OCR_BACK status:" + result.status);
            return result;
        }

        outputProbs.clear();
        outputLandmark.clear();
        outputBoxes.clear();

        // Copy the input data into TensorFlow.
        inferenceInterface.feed(inputName, floatValues, bh, bw, 3);
        // Run the inference call.
        inferenceInterface.run(outputNames, false);
        // Copy the output Tensor back into the output array.
        inferenceInterface.fetch(outputNames[0], outputProbs);
        inferenceInterface.fetch(outputNames[1], outputLandmark);
        inferenceInterface.fetch(outputNames[2], outputBoxes);

        outputProbs.flip();
        outputLandmark.flip();
        outputBoxes.flip();
        Log.e("mylog", "endModel");

        time3 = System.currentTimeMillis();

        int len = outputProbs.remaining();
        int len2 = outputLandmark.remaining();
        int len3 = outputBoxes.remaining();
        Log.e("mylog", "len:" + len + "len2:" + len2 + "len3:" + len3);

        float outputProbs_new = 0;
        if (len > 0) outputProbs_new = outputProbs.get(len - 1);
        Log.e("mylog", "outputProbs_new = " + outputProbs_new);

        //remaining是零就不要走下面的了,outputProbs_new人脸的概率
        if (outputProbs_new > faceRaceMr && len2 != 0 && len3 != 0) {
            Log.e("mylog", "get face picture success");

            // 说明图片可以识别出人脸
            float top = outputBoxes.get();
            float left = outputBoxes.get();
            float bottom = outputBoxes.get();
            float right = outputBoxes.get();

            //point [0:4]是五个关键点在水平方向上的坐标(列坐标)，point [5:9]是五个关键点在垂直方向上的坐标（行坐标）。
            float left_eye_y = outputLandmark.get();
            float right_eye_y = outputLandmark.get();
            float noise_y = outputLandmark.get();
            float left_mouse_y = outputLandmark.get();
            float right_mouse_y = outputLandmark.get();

            float left_eye_x = outputLandmark.get();
            float right_eye_x = outputLandmark.get();
            float noise_x = outputLandmark.get();
            float left_mouse_x = outputLandmark.get();
            float right_mouse_x = outputLandmark.get();

            if (model == 0) {
                Log.e("mylog", "get face picture success of MODEL_OCR");
                // 身份证照片的状态，模糊：-1，正常：0，偏上：1，偏下：2，偏左：3，偏右：4，偏小：5，偏大:6
                double boxLengh = Math.sqrt(Math.pow(top - bottom, 2) + Math.pow(left - right, 2));
                if ((bw * 1 / 2 < noise_x && noise_x < bw * 8 / 9 && noise_y < bh * 13 / 20 && noise_y > 1 * bh / 3)
                        && (Math.abs(left_eye_y - right_eye_y) < bh / 40)
                        && (bh / 8 < boxLengh && boxLengh < bh / 3)) { pictureStata = 0;isRight = 0; }
                else if (noise_y < 1 * bh / 3) { pictureStata = 1; }
                else if (noise_y > bh * 13 / 20) { pictureStata = 2; }
                else if (noise_x > bw * 7 / 9) { pictureStata = 3; }
                else if (noise_x < bw * 5 / 8) { pictureStata = 4; }
                else if (Math.sqrt((top - bottom) * (top - bottom) + (left - right) * (left - right)) < bh / 8) {
                    pictureStata = 5; }
                else { pictureStata = 6;isRight = 0; }
            }
            else if (model == 1) {
                Log.e("mylog", "get face picture success of MODEL_FACE");
                if (Math.abs(left_eye_y - right_eye_y) < bh / 40) {
                    // 说明图片识别出人脸
                    isRight = 0;
                    pictureStata = 0;
                } else {
                    // 说明图片识别出人脸,但人脸角度倾斜较大
                    isRight = 1;
                    pictureStata = 0; }
            }
            else if (model == 3){
                Log.e("mylog", "get face picture success of MODEL_INTENT");
                int radianLeft = calAngle(left_eye_x, noise_x, left_mouse_x, left_eye_y, noise_y, left_mouse_y);
                int radianRight = calAngle(right_eye_x, noise_x, right_mouse_x, right_eye_y, noise_y, right_mouse_y);
                radianUp = calAngle(left_eye_x, noise_x, right_eye_x, left_eye_y, noise_y, right_eye_y);
                radianDown = calAngle(left_mouse_x, noise_x, right_mouse_x, left_mouse_y, noise_y, right_mouse_y);
                Log.e("mylog", "radianUp = " + radianUp + ";  radianDown = " + radianDown+ ";  radianLeft = " + radianLeft+ ";  radianRight = " + radianRight);

                // 正面人脸照片的判定条件
                boolean positiveFace = (Math.abs(radianLeft - radianRight) < 30 && radianUp > radianUpNum-5 && radianUp <= radianUpNum + 20);
//                if (radianDown >= radianDownNum && willingActionResult[0]==1) willingActionResult[1]=1; // 捕捉到正面人脸照片后，再捕捉到点头照片
//                if (radianDown < radianDownNum && positiveFace && willingActionResult[1]==1) willingActionResult[2]=1; // 捕捉到正面，点头照片，再捕捉到一张正面照片
//                if (willingActionResult[2]==1){ // 意愿检测完成
//                    isRight = 0;Log.e("mylog", "willingActionResult is success");}
//                Log.e("mylog", "willingActionResult:" + willingActionResult[0] + "," + willingActionResult[1] + "," + willingActionResult[2]);
//                intentType = onCommand();
                Log.e("MTCNN","intentType"+intentType);
                switch (intentType){
                    case CMD_INTENT_START:
                        willingActionResult = new int[]{0, 0, 0};

                        // 意愿检测开始，重置意愿检测完成的情况数组。
                        isRight = 1; // 如果主程序提示要重新开始，则将意愿检测结果重置为1，即没有完成
                        Log.e("mylog", "CMD_INTENT_START willingActionResult[0] = " + willingActionResult[0]);
                        if (willingActionResult[0]==0 &&
                                 positiveFace && radianDown < radianDownNum -8){
                            Log.e("mylog", "CMD_INTENT_START brennerMR = " + brennerMR);
                            Log.e("mylog", "CMD_INTENT_START brennerValue = " + brennerValue);
                            willingActionResult[0]=1;
                            pictureStata = 7;}
                        Log.e("mylog","CMD_INTENT_START pictureStata = "+pictureStata);
                        Log.e("mylog","uploadImage normal picture"+pictureStata);
                        break;
                    case CMD_INTENT_LOOKUP: // 检测该图片是抬头
                        if (radianDown < radianDownNum -5 && positiveFace) pictureStata = 9;
                        Log.e("mylog","CMD_INTENT_LOOKUP pictureStata = "+pictureStata);
                        break;
                    case CMD_INTENT_LOOKDOWN: // 检测该图片是低头
                        if (radianDown >= radianDownNum) pictureStata = 8;
                        Log.e("mylog","CMD_INTENT_LOOKDOWN pictureStata = "+pictureStata);
                        break;
                    default:
                        break;
                }
                if (radianDown >= radianDownNum && willingActionResult[0]==1) willingActionResult[1]=1; // 捕捉到正面人脸照片后，再捕捉到点头照片
                if (radianDown < radianDownNum && positiveFace && willingActionResult[1]==1) willingActionResult[2]=1; // 捕捉到正面，点头照片，再捕捉到一张正面照片
                if (willingActionResult[2]==1){ // 意愿检测完成
                    isRight = 0;Log.e("mylog", "willingActionResult is success");}
                Log.e("mylog", "willingActionResult:" + willingActionResult[0] + "," + willingActionResult[1] + "," + willingActionResult[2]);
            }
        }

        outputProbs.compact();
        outputLandmark.compact();
        outputBoxes.compact();

        Log.e("mylog", "panduanend");
        result.isSucess = isRight;
        result.status = pictureStata;
        time4 = System.currentTimeMillis();

        Log.e("xiong", "totalCost:" + (time4 - time0));
        Log.e("myLog", " isSuccess = " + result.isSucess + "; status = " + result.status);

        return result;
    }

    public String getStatString() {
        return inferenceInterface.getStatString();
    }

    public void close() {
        inferenceInterface.close();
    }

    public int onCommand(int cmd){
        Log.e("MTCNN","cmd"+cmd);
        intentType = cmd;
        return cmd;
    }
}
