package com.newlink.building.userinfo.helper

//import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.Canvas
import android.graphics.Color
import android.graphics.ImageFormat
import android.graphics.Matrix
import android.graphics.Paint
import android.graphics.Rect
import android.graphics.RectF
import android.graphics.YuvImage
import android.media.Image
import android.util.Base64
import android.util.Log
import com.google.android.gms.tasks.OnCompleteListener
import com.google.android.gms.tasks.OnFailureListener
import com.google.android.gms.tasks.OnSuccessListener
import com.google.android.gms.tasks.Task
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.Face
import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetector
import com.google.mlkit.vision.face.FaceDetectorOptions
import com.newlink.building.common_base.utils.NLog
import java.io.ByteArrayOutputStream
import java.nio.ReadOnlyBufferException
import kotlin.experimental.inv


/**
 * @Author: Jake
 * @Date: 2023-12-19
 * @Description:
 */
object Module_UserInfo_FaceDetectUtil {

    const val TAG: String = "authface"

    var mDetector: FaceDetector? = null


    init {
        initGoogleMkSdk()
    }

    private fun initGoogleMkSdk() {
        //Initialize Face Detector
//        val highAccuracyOpts = FaceDetectorOptions.Builder()
//            .setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
//            .setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
//            .setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL)
//            .setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_FAST)
//            .setMinFaceSize(0.5f)
//            .build()

        val highAccuracyOpts = FaceDetectorOptions.Builder()
            .setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
            .setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
            .setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL).build()
        mDetector = FaceDetection.getClient(highAccuracyOpts)

    }


    fun detectFace(bitmap: Bitmap, callback: (String) -> Unit) {
        val image = InputImage.fromBitmap(bitmap, 0)
        NLog.e("detectFace bitmap is ${bitmap.byteCount}")
        NLog.e("detectFace image is ${image.width} , ${image.height}")
        assert(image != null)
        mDetector?.process(image)!!.addOnSuccessListener(
            OnSuccessListener<List<Face>> { faces: List<Face> ->
                Log.e(TAG, " ======= mDetector 10000001==========")
                if (faces.isNotEmpty()) {
//                    val face = faces[0] //Get first face from detected faces
                    //                if (faces.isNotEmpty()) {
//
////							Face face = faces.get(0);
                    Log.e(TAG, " ======= mDetector 1111111111==========")
                    val face: Face? =
                        getCenterMostFace(faces, image.width, image.height)
                    Log.e(TAG, " ======= mDetector 222222222222==========")
                    face?.let {
                        val frameBmp = rotateBitmap(bitmap, 0, false)

                        //Get bounding box of face
                        val boundingBox = RectF(it.boundingBox)

                        //Crop out bounding box from whole Bitmap(image)
                        var croppedFace = getCropBitmapByCPU(frameBmp, boundingBox)
                        croppedFace = rotateBitmap(croppedFace!!, 0, true)
                        Log.e(TAG, " ======= mDetector 33333333333==========")
                        //Scale the acquired Face to 112*112 which is required input for model
                        val scaled: Bitmap = getResizedBitmap(croppedFace!!, 112, 112)
//                    recognizeImage(scaled) //Send scaled bitmap to create face embeddings.
                        Log.e(TAG, " ======= 识别到人脸信息 ==========")
                        val faceBase64 = byte2Base64(bitmap2Byte(scaled))
                        callback(faceBase64!!)
                        return@OnSuccessListener
                    }
                } else {
                    Log.e(TAG, " ======= mDetector 44444444444==========")
                    callback("")
                }

            })
            .addOnFailureListener(OnFailureListener { e: Exception? ->
                Log.e(TAG, "<jake> ======= 未识别到人脸信息")
                callback("")
            })
            .addOnCompleteListener(OnCompleteListener<List<Face?>?> { task: Task<List<Face?>?>? ->
                Log.e(TAG, "<jake> ======= 识别完毕")
            })


        //Process acquired image to detect faces
//        assert(image != null)
//        val result: Task<List<Face>> = mDetector?.process(image)?.addOnSuccessListener(
//            OnSuccessListener<List<Face?>> { faces: List<Face?> ->
//                if (faces.isNotEmpty()) {
//
////							Face face = faces.get(0);
//                    val face: Face? =
//                        getCenterMostFace(faces, image.getWidth(), image.getHeight())
//
//                    //image to Bitmap
//                    val frame_bmp = toBitmap(image)
//                    val rot: Int = imageProxy.getImageInfo().getRotationDegrees()
//
//                    //Adjust orientation of Face
//                    val frame_bmp1 =
//                        FaceDetectionActivity.rotateBitmap(frame_bmp, rot, false)
//
//                    //Get bounding box of face
//                    val boundingBox =
//                        RectF(face.boundingBox)
//
//                    //Crop out bounding box from whole Bitmap(image)
//                    var cropped_face =
//                        FaceDetectionActivity.getCropBitmapByCPU(frame_bmp1, boundingBox)
//                    cropped_face = FaceDetectionActivity.rotateBitmap(cropped_face, 0, true)
//
//                    //Scale the acquired Face to 112*112 which is required input for model
//                    val scaled =
//                        getResizedBitmap(cropped_face, inputSize, inputSize)
//                    recognizeImage(scaled) //Send scaled bitmap to create face embeddings.
//                } else {
//                    mCurrentFaceBitmap = null
//                    mAddFace.setVisibility(View.INVISIBLE) //If no face is detected -> Remove the 'Add Face' button from view
//                }
//            }).addOnFailureListener(OnFailureListener { e: Exception? -> })
//            .addOnCompleteListener(OnCompleteListener<List<Face?>?> { task: Task<List<Face?>?>? ->
//                imageProxy.close() //v.important to acquire next frame for analysis
//            })
//
//    })


    }


    private fun getCenterMostFace(faces: List<Face>, imageWidth: Int, imageHeight: Int): Face? {
        // Calculate the center point of the image
        val imageCenterX = imageWidth / 2.0f
        val imageCenterY = imageHeight / 2.0f

        // Initialize the minimum distance with a large value
        var minDistance = Double.MAX_VALUE
        var centerMostFace: Face? = null

        // Loop through all detected faces to find the center most face
        for (face in faces) {
            val boundingBox = face.boundingBox
            val faceCenterX = boundingBox.centerX().toFloat()
            val faceCenterY = boundingBox.centerY().toFloat()

            // Calculate the Euclidean distance from the face center to the image center
            val distance = Math.sqrt(
                Math.pow(
                    (faceCenterX - imageCenterX).toDouble(),
                    2.0
                ) + Math.pow((faceCenterY - imageCenterY).toDouble(), 2.0)
            )

            // Check if this face is closer to the image center than the previous closest face
            if (distance < minDistance) {
                minDistance = distance
                centerMostFace = face
            }
        }
        return centerMostFace
    }


    fun rotateBitmap(bitmap: Bitmap, rotationDegrees: Int, flipX: Boolean): Bitmap? {
        val matrix = Matrix()

        // Rotate the image back to straight.
        matrix.postRotate(rotationDegrees.toFloat())

        // Mirror the image along the X or Y axis.
        matrix.postScale(if (flipX) -1.0f else 1.0f, 1.0f)
        val rotatedBitmap =
            Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, true)

        // Recycle the old bitmap if it has changed.
        if (rotatedBitmap != bitmap) {
            bitmap.recycle()
        }
        return rotatedBitmap
    }

    //IMPORTANT. If conversion not done ,the toBitmap conversion does not work on some devices.
    private fun YUV_420_888toNV21(image: Image): ByteArray {
        val width = image.width
        val height = image.height
        val ySize = width * height
        val uvSize = width * height / 4
        val nv21 = ByteArray(ySize + uvSize * 2)
        val yBuffer = image.planes[0].buffer // Y
        val uBuffer = image.planes[1].buffer // U
        val vBuffer = image.planes[2].buffer // V
        var rowStride = image.planes[0].rowStride
        assert(image.planes[0].pixelStride == 1)
        var pos = 0
        if (rowStride == width) { // likely
            yBuffer[nv21, 0, ySize]
            pos += ySize
        } else {
            var yBufferPos = -rowStride.toLong() // not an actual position
            while (pos < ySize) {
                yBufferPos += rowStride.toLong()
                yBuffer.position(yBufferPos.toInt())
                yBuffer[nv21, pos, width]
                pos += width
            }
        }
        rowStride = image.planes[2].rowStride
        val pixelStride = image.planes[2].pixelStride
        assert(rowStride == image.planes[1].rowStride)
        assert(pixelStride == image.planes[1].pixelStride)
        if (pixelStride == 2 && rowStride == width && uBuffer[0] == vBuffer[1]) {
            // maybe V an U planes overlap as per NV21, which means vBuffer[1] is alias of uBuffer[0]
            val savePixel = vBuffer[1]
            try {
                vBuffer.put(1, savePixel.inv() as Byte)
                if (uBuffer[0] == savePixel.inv() as Byte) {
                    vBuffer.put(1, savePixel)
                    vBuffer.position(0)
                    uBuffer.position(0)
                    vBuffer[nv21, ySize, 1]
                    uBuffer[nv21, ySize + 1, uBuffer.remaining()]
                    return nv21 // shortcut
                }
            } catch (ex: ReadOnlyBufferException) {
                // unfortunately, we cannot check if vBuffer and uBuffer overlap
            }

            // unfortunately, the check failed. We must save U and V pixel by pixel
            vBuffer.put(1, savePixel)
        }

        // other optimizations could check if (pixelStride == 1) or (pixelStride == 2),
        // but performance gain would be less significant
        for (row in 0 until height / 2) {
            for (col in 0 until width / 2) {
                val vuPos = col * pixelStride + row * rowStride
                nv21[pos++] = vBuffer[vuPos]
                nv21[pos++] = uBuffer[vuPos]
            }
        }
        return nv21
    }

    private fun toBitmap(image: Image): Bitmap? {
        val nv21 = YUV_420_888toNV21(image)
        val yuvImage = YuvImage(nv21, ImageFormat.NV21, image.width, image.height, null)
        val out = ByteArrayOutputStream()
        yuvImage.compressToJpeg(Rect(0, 0, yuvImage.width, yuvImage.height), 75, out)
        val imageBytes = out.toByteArray()
        return BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
    }


    fun getCropBitmapByCPU(source: Bitmap?, cropRectF: RectF): Bitmap? {
        val resultBitmap = Bitmap.createBitmap(
            cropRectF.width().toInt(),
            cropRectF.height().toInt(),
            Bitmap.Config.RGB_565
        )
        val canvas = Canvas(resultBitmap)

        // draw background
        val paint = Paint(Paint.FILTER_BITMAP_FLAG)
        paint.color = Color.WHITE
        canvas.drawRect(RectF(0f, 0f, cropRectF.width(), cropRectF.height()), paint)
        val matrix = Matrix()
        matrix.postTranslate(-cropRectF.left, -cropRectF.top)
        canvas.drawBitmap(source!!, matrix, paint)
        if (source != null && !source.isRecycled) {
            source.recycle()
        }
        return resultBitmap
    }

    fun getResizedBitmap(bm: Bitmap, newWidth: Int, newHeight: Int): Bitmap {
        val width = bm.width
        val height = bm.height
        val scaleWidth = newWidth.toFloat() / width
        val scaleHeight = newHeight.toFloat() / height
        // CREATE A MATRIX FOR THE MANIPULATION
        val matrix = Matrix()
        // RESIZE THE BIT MAP
        matrix.postScale(scaleWidth, scaleHeight)
        // "RECREATE" THE NEW BITMAP
        val resizedBitmap = Bitmap.createBitmap(bm, 0, 0, width, height, matrix, false)
        bm.recycle()
        return resizedBitmap
    }

    fun recognizeImage(bitmap: Bitmap) {

//        //Create ByteBuffer to store normalized image
//        val imgData = ByteBuffer.allocateDirect(inputSize * inputSize * 3 * 4)
//        imgData.order(ByteOrder.nativeOrder())
//        intValues = IntArray(inputSize * inputSize)
//
//        //get pixel values from Bitmap to normalize
//        bitmap.getPixels(intValues, 0, bitmap.width, 0, 0, bitmap.width, bitmap.height)
//        imgData.rewind()
//        for (i in 0 until inputSize) {
//            for (j in 0 until inputSize) {
//                val pixelValue: Int = intValues.get(i * inputSize + j)
//                if (isModelQuantized) {
//                    // Quantized model
//                    imgData.put((pixelValue shr 16 and 0xFF).toByte())
//                    imgData.put((pixelValue shr 8 and 0xFF).toByte())
//                    imgData.put((pixelValue and 0xFF).toByte())
//                } else { // Float model
//                    imgData.putFloat(((pixelValue shr 16 and 0xFF) - IMAGE_MEAN) / IMAGE_STD)
//                    imgData.putFloat(((pixelValue shr 8 and 0xFF) - IMAGE_MEAN) / IMAGE_STD)
//                    imgData.putFloat(((pixelValue and 0xFF) - IMAGE_MEAN) / IMAGE_STD)
//                }
//            }
//        }

    }

    fun bitmap2Byte(bitmap: Bitmap?): ByteArray? {
        if (null == bitmap) throw NullPointerException()
        // if (null == bitmap) return null;
        val outputStream = ByteArrayOutputStream()
        //把bitmap100%高质量压缩 到 output对象里
        bitmap.compress(Bitmap.CompressFormat.JPEG, 100, outputStream)
        return outputStream.toByteArray()
    }

    fun byte2Base64(imageByte: ByteArray?): String? {
        return if (null == imageByte) null else Base64.encodeToString(imageByte, Base64.DEFAULT)
    }

    fun bitmapToBase64(bitmap: Bitmap): String {
        val baos = ByteArrayOutputStream()
        bitmap.compress(Bitmap.CompressFormat.PNG, 100, baos)
        val byteArray = baos.toByteArray()
        return Base64.encodeToString(byteArray, Base64.DEFAULT)
    }
}