package com.awo.facedetect.analyzer;

import android.annotation.SuppressLint;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Matrix;
import android.graphics.Paint;
import android.graphics.Rect;
import android.media.Image;
import android.util.Log;

import androidx.annotation.NonNull;
import androidx.camera.core.ImageAnalysis;
import androidx.camera.core.ImageProxy;
import androidx.camera.core.internal.utils.ImageUtil;
import androidx.camera.view.PreviewView;

import com.awo.facedetect.activity.FaceDetectActivity;
import com.awo.facedetect.util.FileUtil;
import com.awo.facedetect.widget.FaceDrawInfo;

import org.pytorch.IValue;
import org.pytorch.Module;
import org.pytorch.Tensor;
import org.pytorch.torchvision.TensorImageUtils;

import java.io.IOException;
import java.util.Arrays;


/**
 * 人脸检测的视频帧分析类.
 * analyze()方法会处理最新的视频帧.
 */
public class FaceDetectAnalyzer implements ImageAnalysis.Analyzer {
    private static final String TAG = "FaceDetectAnalyzer";

    // for yolov5 model, no need to apply MEAN and STD
    final static float[] MEAN_RGB = new float[] {116.0f, 117.0f, 111.0f};
    final static float[] STD_RGB = new float[] {1.0f, 1.0f, 1.0f};

    // model input image size
    final static int mInputWidth = 640;
    final static int mInputHeight = 640;

    private FaceDetectActivity context;

    // pytorch 模型
    Module module;

    public FaceDetectAnalyzer() { }

    public FaceDetectAnalyzer(FaceDetectActivity context) {
        this.context = context;
    }

    @SuppressLint("RestrictedApi")
    @Override
    public void analyze(@NonNull ImageProxy imageProxy) {
        // 初始话人脸框信息， 对应于FaceDrawInfo中的属性
        int[] result = new int[]{0, 0, 16, 16};
        // 模型路径
        String modelPath;
        Bitmap jbitmap;
        byte[] bytes;
        try {
//            jbitmap = BitmapFactory.decodeStream(context.getAssets().open("me.jpg"));
            // 加载模型， 保证模型只加载一次
            modelPath = FileUtil.assetFilePath(context, "mbv2.pt");
            module = module == null ? Module.load(modelPath) : module;

            // 处理帧图像，将帧图像进行旋转（保证和预览画面一致）、缩小尺寸。
            bytes = ImageUtil.imageToJpegByteArray(imageProxy);
            jbitmap = BitmapFactory.decodeByteArray(bytes, 0, bytes.length);
//            Matrix matrix = new Matrix();
//            matrix.postRotate(90.0f);
//            jbitmap = Bitmap.createBitmap(jbitmap, 0, 0, jbitmap.getWidth(), jbitmap.getHeight(), matrix, true);
        } catch (IOException | ImageUtil.CodecFailedException e) {
            e.printStackTrace();
            return; // 出现加载模型失败则退出分析
        }
        //************************* resize and padding to 640*640 *********************//
        Bitmap mergebitmap= FaceUtils.PreProcessing(jbitmap);

        // preparing input tensor
//    float[] face_mean = new float[]{104.0f, 117.0f, 123.0f};
        float[] face_mean = new float[]{116.0f, 117.0f, 111.0f};   //offset to {104.0f, 117.0f, 123.0f}
        float[] face_std = new float[]{1.0f, 1.0f, 1.0f};
        final Tensor inputTensor = FaceUtils.bitmapToFloat32Tensor(mergebitmap,
                face_mean, face_std);

        System.out.println(inputTensor.getDataAsFloatArray().length);
        final IValue[] outputTensor = module.forward(IValue.from(inputTensor)).toTuple();

        //*************************** bbox ******************************//
        IValue bbox = outputTensor[0];
        Tensor boxt = bbox.toTensor();
        float[] facebox = boxt.getDataAsFloatArray();

        IValue cls = outputTensor[1];
        Tensor clst = cls.toTensor();
        float[] facecls = clst.getDataAsFloatArray();

        int imw = mergebitmap.getWidth();
        int imh = mergebitmap.getHeight();

        double fmw1 = Math.ceil(((float) imw) / 16.0f );
        double fmh1 = Math.ceil(((float) imh) / 16.0f );
        double fmw2 = Math.ceil(((float) imw) / 32.0f );
        double fmh2 = Math.ceil(((float) imh) / 32.0f );
        double fmw3 = Math.ceil(((float) imw) / 64.0f );
        double fmh3 = Math.ceil(((float) imh) / 64.0f );

        int totalnum = 2*(((int)fmh1)*((int)fmw1)+((int)fmh2)*((int)fmw2)+((int)fmh3)*((int)fmw3));
        float maxcls = 0.0f;
        float maxx = 0.0f;
        float maxy = 0.0f;
        float maxw = 0.0f;
        float maxh = 0.0f;
        float[] faceconf = new float[totalnum];
        int[] faceidx = new int[totalnum];
        int clsnum = 0;
        int maxidx = 0;
        facebbox[] fbbox = new facebbox[totalnum];
        for (int i = 0; i < facebox.length; i = i+4) {
            int clsidx = (int) (i/4);
            float clsconf = facecls[2 *clsidx+1];
            facebbox boxtmp = new facebbox();
            boxtmp.score = clsconf;
            boxtmp.x1 = facebox[i];
            boxtmp.y1 = facebox[i + 1];
            boxtmp.x2 = facebox[i + 2];
            boxtmp.y2 = facebox[i + 3];
            fbbox[clsidx] = boxtmp;
            if(clsconf > 0.2){
                faceconf[clsnum] = clsconf;
                faceidx[clsnum] = clsidx;
                clsnum +=1;
            }
            if (clsconf > maxcls) {
                maxcls = clsconf;
                maxidx = clsidx;
            }
        }
        System.out.println("MAX :"+maxcls);
        System.out.println("MAX idx:"+maxidx);
//
//    // ************************************* PriorBox ***********************************//
        facebbox[] Anchors = FaceUtils.getAnchors(imw,imh);

        if (clsnum < 1) {
            Log.d(TAG, "no face...");
            context.updateFaceRectUIByFDI(null);
            imageProxy.close();
            return;
        } else {
            Log.d(TAG, "has face: " + clsnum);
        }

//    //***************************** decode bbox from anchor and pred *********************************//
//     ************************************ 按照置信度降序排列 **************************************** //
        float[] PredConfOr = new float[clsnum];
        int[] PredBoxX = new int[clsnum];
        int[] PredBoxY = new int[clsnum];
        int[] PredBoxW = new int[clsnum];
        int[] PredBoxH = new int[clsnum];

        Bitmap bitmap2 = jbitmap.copy(Bitmap.Config.ARGB_8888, true);

        Canvas canvas = new Canvas(bitmap2);
        Paint paint = new Paint();
        paint.setColor(Color.RED);
        paint.setStyle(Paint.Style.STROKE);
        paint.setStrokeWidth(3);

        for(int k=0;k<clsnum;k++) {
            for (int j = 0; j < clsnum - k - 1; j++) {
                if (faceconf[j] < faceconf[j + 1]) {
                    float tmp = faceconf[j];
                    faceconf[j] = faceconf[j + 1];
                    faceconf[j + 1] = tmp;
                    int idxtmp = faceidx[j];
                    faceidx[j] = faceidx[j+1];
                    faceidx[j+1] = idxtmp;

                }
            }
        }

        int[] preView = context.getWeightAndHeightOfView();
        int preViewWidth = preView[0];
        int preViewHeight = preView[1];
        float kx = (float) (1.0 * preViewWidth / mergebitmap.getWidth());
        int ky = (int) (1.0 * preViewHeight / mergebitmap.getHeight());
        int dx = (preViewWidth - mergebitmap.getWidth()) / 4;
        int dy = (preViewHeight - mergebitmap.getHeight()) / 2;

        for(int k=0;k<1;k++) {
            int idxtmp = faceidx[k];
            double ax = Anchors[faceidx[k]].x1;
            double ay = Anchors[faceidx[k]].y1;
            double aw = Anchors[faceidx[k]].x2;
            double ah = Anchors[faceidx[k]].y2;

            double bboxx = ax + maxx * 0.1 * aw;
            double bboxy = ay + maxy * 0.1 * ah;
            double bboxw = aw * Math.exp(maxw * 0.2);
            double bboxh = ah * Math.exp(maxh * 0.2);
            float boxconf = faceconf[faceidx[k]];

            bboxx = bboxx - bboxw / 2;
            bboxy = bboxy - bboxh / 2;
            bboxw = bboxw + bboxx;
            bboxh = bboxh + bboxy;

            PredBoxX[k] = (int) Math.round( bboxx*640 );
            PredBoxY[k] = (int) Math.round( bboxy*640 );
            PredBoxW[k] = (int) Math.round( bboxw*640 );
            PredBoxH[k] = (int) Math.round( bboxh*640 );
            canvas.drawRect(PredBoxX[k], PredBoxY[k],PredBoxW[k], PredBoxH[k], paint);
            result[2] = PredBoxW[k] / 2;
            result[3] = PredBoxH[k];
            result[0] = PredBoxX[k];
            result[1] = PredBoxY[k];
        }

        Matrix matrix = getMappingMatrix(imageProxy, context.getPreview());

        // 更新人脸框UI
        Log.d(TAG, "result: " + Arrays.toString(result));
        FaceDrawInfo faceDrawInfo = new FaceDrawInfo(0, 0, preViewWidth, preViewHeight);
        context.updateFaceRectUI(faceDrawInfo, matrix);
        // 必要：关闭imageProxy
        imageProxy.close();
    }

    Matrix getMappingMatrix(ImageProxy imageProxy, PreviewView previewView) {
        Rect cropRect = imageProxy.getCropRect();
        int rotationDegrees = imageProxy.getImageInfo().getRotationDegrees();
        Matrix matrix = new Matrix();

        // A float array of the source vertices (crop rect) in clockwise order.
        float[] source = {
                cropRect.left,
                cropRect.top,
                cropRect.right,
                cropRect.top,
                cropRect.right,
                cropRect.bottom,
                cropRect.left,
                cropRect.bottom
        };

        // A float array of the destination vertices in clockwise order.
        float[] destination = {
                0f,
                0f,
                previewView.getWidth(),
                0f,
                previewView.getWidth(),
                previewView.getHeight(),
                0f,
                previewView.getHeight()
        };

        // The destination vertexes need to be shifted based on rotation degrees.
        // The rotation degree represents the clockwise rotation needed to correct
        // the image.

        // Each vertex is represented by 2 float numbers in the vertices array.
        int vertexSize = 2;
        // The destination needs to be shifted 1 vertex for every 90° rotation.
        int shiftOffset = rotationDegrees / 90 * vertexSize;
        float[] tempArray = destination.clone();
        for (int toIndex = 0; toIndex < source.length; toIndex++) {
            int fromIndex = (toIndex + shiftOffset) % source.length;
            destination[toIndex] = tempArray[fromIndex];
        }
        matrix.setPolyToPoly(source, 0, destination, 0, 4);
        return matrix;
    }


}
