package com.yricky.atri.utils.yolov5

import android.graphics.Bitmap
import android.graphics.RectF
import android.util.Log
import com.yricky.atri.utils.common.Result
import org.tensorflow.lite.Interpreter
import java.io.File
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.util.*
import kotlin.collections.ArrayList
import kotlin.math.pow

/**
 * @author Yricky
 * @date 2022/1/8
 */
class YoloV5fp16Session(
    private val modelFile:File,
    private val options: Interpreter.Options,
    private val labels:List<String>
) {
    companion object{
        const val INPUT_SIZE = 320
        const val IMAGE_STD = 255.0f
        const val mNmsThresh = 0.6f

        val output_box get()= (((INPUT_SIZE / 32).toDouble().pow(2.0) + (INPUT_SIZE / 16).toDouble()
            .pow(2.0) + (INPUT_SIZE / 8).toDouble().pow(2.0)) * 3).toInt()

        val numBytesPerChannel get() = 4

        fun overlap(x1: Float, w1: Float, x2: Float, w2: Float): Float {
            val l1 = x1 - w1 / 2
            val l2 = x2 - w2 / 2
            val left = if (l1 > l2) l1 else l2
            val r1 = x1 + w1 / 2
            val r2 = x2 + w2 / 2
            val right = if (r1 < r2) r1 else r2
            return right - left
        }

        fun box_union(a: RectF, b: RectF): Float {
            val i = box_intersection(a, b)
            return (a.right - a.left) * (a.bottom - a.top) + (b.right - b.left) * (b.bottom - b.top) - i
        }

        fun box_iou(a: RectF, b: RectF): Float {
            return box_intersection(a, b) / box_union(a, b)
        }

        fun box_intersection(a: RectF, b: RectF): Float {
            val w: Float = overlap(
                (a.left + a.right) / 2, a.right - a.left,
                (b.left + b.right) / 2, b.right - b.left
            )
            val h: Float = overlap(
                (a.top + a.bottom) / 2, a.bottom - a.top,
                (b.top + b.bottom) / 2, b.bottom - b.top
            )
            return if (w < 0 || h < 0) 0f else w * h
        }

        sealed class State{
            object TERM:State()
            object IDLE:State()
            object BUSY:State()
        }
    }

    var state:State = State.IDLE
        private set

    val isBusy get()= state is State.BUSY

    private val numClass by lazy{
        interpreter.getOutputTensor(0).shape().let{
            it[it.size -1] - 5
        }
    }

    private val interpreter:Interpreter by lazy {
        Interpreter(modelFile,options)
    }

    private val imgData:ByteBuffer by lazy{
        ByteBuffer.allocateDirect(INPUT_SIZE * INPUT_SIZE * 3 * numBytesPerChannel).also {
            it.order(ByteOrder.nativeOrder())
        }
    }
    private val outData:ByteBuffer by lazy{
        ByteBuffer.allocateDirect(output_box * (numClass + 5) * numBytesPerChannel).also {
            it.order(ByteOrder.nativeOrder())
        }
    }

    private val intValues:IntArray by lazy{
        IntArray(INPUT_SIZE * INPUT_SIZE)
    }


    private fun convertBitmapToByteBuffer(bitmap: Bitmap): ByteBuffer {

        bitmap.getPixels(intValues, 0, bitmap.width, 0, 0, bitmap.width, bitmap.height)
        imgData.rewind()
        for (i in 0 until INPUT_SIZE) {
            for (j in 0 until INPUT_SIZE) {
                val pixelValue: Int = intValues[i * INPUT_SIZE + j]
                val r = (((pixelValue shr 16) and 0xFF)) / IMAGE_STD
                val g = (((pixelValue shr 8) and 0xFF)) / IMAGE_STD
                val b = ((pixelValue and 0xFF)) / IMAGE_STD
                imgData.putFloat(r)
                imgData.putFloat(g)
                imgData.putFloat(b)
            }
        }
        return imgData
    }

    //non maximum suppression
    private fun nms(list: ArrayList<Recognition>): List<Recognition> {
        val nmsList = ArrayList<Recognition>()
        for (k in labels.indices) {
            //1.find max confidence per class
            val pq = PriorityQueue<Recognition>(
                50
            ) { lhs, rhs -> // Intentionally reversed to put high confidence at the head of the queue.
                rhs.confidence.compareTo(lhs.confidence)
            }
            for (i in list.indices) {
                if (list[i].detectedClass == k) {
                    pq.add(list[i])
                }
            }

            //2.do non maximum suppression
            while (pq.size > 0) {
                //insert detection with max confidence
                val a = arrayOfNulls<Recognition>(pq.size)
                val detections: Array<Recognition> = pq.toArray(a)
                val max = detections[0]
                nmsList.add(max)
                pq.clear()
                for (j in 1 until detections.size) {
                    val detection = detections[j]
                    val b = detection.location
                    if (box_iou(max.location, b) < mNmsThresh) {
                        pq.add(detection)
                    }
                }
            }
        }
        return nmsList
    }

    fun recognizeImage(bitmap: Bitmap): Result<List<Recognition>> {
        if(state is State.TERM || state is State.BUSY)
            return Result(0,0, emptyList())
        state = State.BUSY
        val totalStartTime = System.currentTimeMillis()
        convertBitmapToByteBuffer(bitmap)
        outData.rewind()
        val startTime = System.currentTimeMillis()
        synchronized(interpreter){
            interpreter.run(imgData, outData)
        }
        val endTime = System.currentTimeMillis()
        outData.rewind()
        val detections = ArrayList<Recognition>()
        val out =
            Array(1) {
                Array(output_box) {
                    FloatArray(numClass + 5)
                }
            }
//        Log.d("YoloV5Classifier", "out[0] detect start")
        for (i in 0 until output_box) {
            for (j in 0 until numClass + 5) {
                out[0][i][j] = outData.float
            }
            for (j in 0..3) {
                out[0][i][j] = out[0][i][j]* INPUT_SIZE
            }
        }
        for (i in 0 until output_box) {
            val offset = 0
            val confidence = out[0][i][4]
            var detectedClass = -1
            var maxClass = 0f
            val classes = FloatArray(labels.size)
            for (c in labels.indices) {
                classes[c] = out[0][i][5 + c]
            }
            for (c in labels.indices) {
                if (classes[c] > maxClass) {
                    detectedClass = c
                    maxClass = classes[c]
                }
            }
            val confidenceInClass = maxClass * confidence
            if (confidenceInClass > 0.3f) {
                val xPos = out[0][i][0]
                val yPos = out[0][i][1]
                val w = out[0][i][2]
                val h = out[0][i][3]
                Log.d(
                    "YoloV5Classifier",
                    "$xPos,$yPos,$w,$h"
                )
                val rect = RectF(
                    0f.coerceAtLeast(xPos - w / 2),
                    0f.coerceAtLeast(yPos - h / 2),
                    (bitmap.width - 1).toFloat().coerceAtMost(xPos + w / 2),
                    (bitmap.height - 1).toFloat().coerceAtMost(yPos + h / 2)
                )
                detections.add(
                    Recognition(
                        "" + offset,
                        labels[detectedClass],
                        confidenceInClass,
                        rect,
                        detectedClass
                    )
                )
            }
        }
        val totalEndTime = System.currentTimeMillis()
        if(state is State.BUSY)
            state = State.IDLE
        return Result(
            interpreterMs = endTime - startTime,
            totalMs = totalEndTime - totalStartTime,
            result = nms(detections)
        )
    }

    fun finish(){
        state = State.TERM
//        interpreter.close()
    }
}