package com.lhkk.inventoryvehicle.ai // Replace with your actual package name

import android.content.Context
import android.graphics.RectF // Import RectF for bounding box operations
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import androidx.camera.view.PreviewView
import org.tensorflow.lite.support.image.ImageProcessor
import org.tensorflow.lite.support.image.TensorImage
import org.tensorflow.lite.support.image.ops.Rot90Op
import org.tensorflow.lite.task.core.BaseOptions
import org.tensorflow.lite.task.vision.detector.Detection
import org.tensorflow.lite.task.vision.detector.ObjectDetector
import java.util.concurrent.atomic.AtomicInteger

// Assuming these are needed for ToastUtil and handler as per previous context
import com.cj.mvvmlib.util.ToastUtil // Import ToastUtil for showing toast messages
import com.lhkk.inventoryvehicle.common.LogFileUtils
import com.lhkk.inventoryvehicle.http.handler // Import handler for UI thread operations

// Define vehicle states for tracking its progression across the screen.
enum class VehicleState {
    NEW,        // Just detected
    APPROACHING, // Crossed first line, approaching the center (车头)
    CENTERED,    // Crossed second line, currently in the central zone (车身)
    DONE         // Has completed all capture stages (e.g., passed through all lines, 车尾)
}

// Simple ID generator for unique vehicle identification.
object VehicleIdProvider {
    private val nextId = AtomicInteger(0)
    fun getNextId() = nextId.incrementAndGet()
}

/**
 * Data class to hold comprehensive tracking information for each vehicle.
 * It includes the latest detection, current state, last known X position for movement
 * direction analysis, and a counter for state transition confirmation.
 * Also includes a counter for consecutive frames without a match, to handle temporary occlusions.
 * New boolean flags are added to ensure photos are taken only once per state.
 */
data class TrackedVehicleInfo(var detection: Detection,
    var state: VehicleState,
    var lastDetectionX: Float,
    var stateTransitionCandidateCounter: Int = 0,
    var lostFramesCounter: Int = 0,
    var hasTakenApproachingPhoto: Boolean = false,
    var hasTakenCenteredPhoto: Boolean = false,
    var hasTakenDonePhoto: Boolean = false)

/**
 * Analyzes image frames from CameraX, performs object detection, and tracks vehicle states.
 * This specific analyzer is configured to handle vehicles moving ONLY from Left-to-Right (Entering from left, exiting right).
 *
 * @param context The application context.
 * @param previewView The PreviewView used for displaying camera feed (nullable for headless scenarios).
 * @param photoCaptureTrigger A lambda function to call in MainActivity when a photo should be taken.
 * It passes the state name and the current detection object.
 * @param listener A lambda function to provide updates: (status string, list of detections, image width, image height).
 */
class VehicleAnalyzerLeftToRight( // Class name
    private val context: Context,
    private val previewView: PreviewView?,
    private val takePictureCallback: (stateName: String) -> Unit, // Passes Detection
    private val listener: (status: String, detections: List<Detection>, w: Int, h: Int) -> Unit) :
    ImageAnalysis.Analyzer {

    private val objectDetector: ObjectDetector
    private var trackedVehicles = mutableMapOf<Int, TrackedVehicleInfo>()

    // --- Normalized positions for Left-to-Right (Entering) movement ---
    private val LR_LINE_A_X_NORMALIZED = 0.20f   // Line A for L->R: 20% from left
    private val LR_LINE_B_X_NORMALIZED = 0.50f   // Line B for L->R: 50% from left (center)
    private val LR_LEAVING_THRESHOLD_RIGHT_NORMALIZED =
        0.95f // L->R Exit: Right edge past 95% from left

    private val FRAME_CONFIRMATION_THRESHOLD = 4
    private val MIN_MOVEMENT_THRESHOLD_NORMALIZED = 0.005f

    private val MAX_LOST_FRAMES = 10
    private val IOU_THRESHOLD = 0.3f // NEW: Minimum IoU to consider two bounding boxes a match

    // NEW: Minimum area for a detection bounding box, normalized to image area (0.0 to 1.0)
    // Adjust this value based on how small you want to filter out. For example, 0.01f means 1% of the image area.
    private val MIN_DETECTION_AREA_NORMALIZED = 0.01f

    init {
        val baseOptions = BaseOptions.builder().build()
        val options = ObjectDetector.ObjectDetectorOptions.builder().setBaseOptions(baseOptions)
            .setMaxResults(5).setScoreThreshold(0.6f).build()
        objectDetector =
            ObjectDetector.createFromFileAndOptions(context, "ssd_mobilenet_v1.tflite", options)
    }

    @androidx.annotation.OptIn(androidx.camera.core.ExperimentalGetImage::class)
    override fun analyze(imageProxy: ImageProxy) {
        val bitmap = imageProxy.toBitmap()

        val imageProcessor =
            ImageProcessor.Builder().add(Rot90Op(-imageProxy.imageInfo.rotationDegrees / 90))
                .build()
        val tensorImage = imageProcessor.process(TensorImage.fromBitmap(bitmap))

        val results = objectDetector.detect(tensorImage)

        // Filter detections: only "car" or "truck" AND a minimum size
        val imageArea = (tensorImage.width * tensorImage.height).toFloat()
        val vehicleDetections = results.filter {
            val detectionArea = it.boundingBox.width() * it.boundingBox.height()
            it.categories.first().label in listOf("car",
                "truck") && (detectionArea / imageArea) > MIN_DETECTION_AREA_NORMALIZED
        }

        updateTrackedVehicles(vehicleDetections, tensorImage.width)

        val status = trackedVehicles.values.filter { it.lostFramesCounter < MAX_LOST_FRAMES }
            .joinToString("\n") { trackedInfo ->
                "车辆ID ${trackedInfo.detection.hashCode()}: ${trackedInfo.state} (从左到右)"
            }

        listener(status, vehicleDetections, tensorImage.width, tensorImage.height)

        imageProxy.close()
    }

    /**
     * Updates the map of tracked vehicles based on new detections.
     * This analyzer assumes all vehicles move from Left-to-Right.
     * Uses IoU for robust matching in multi-vehicle scenarios.
     */
    private fun updateTrackedVehicles(detections: List<Detection>, imageWidth: Int) {
        val updatedTrackedVehiclesMap = mutableMapOf<Int, TrackedVehicleInfo>()
        val unmatchedDetections = detections.toMutableList()

        // 阶段 1: 尝试使用 IoU 匹配现有跟踪车辆与当前检测
        val currentTrackedIds = trackedVehicles.keys.toSet()
        for (id in currentTrackedIds) {
            val trackedInfo = trackedVehicles[id] ?: continue

            var bestMatchDetection: Detection? = null
            var maxIoU = 0f

            val detectionIterator = unmatchedDetections.iterator()
            while (detectionIterator.hasNext()) {
                val currentDetection = detectionIterator.next()
                val iou =
                    calculateIoU(trackedInfo.detection.boundingBox, currentDetection.boundingBox)

                // 找到 IoU 最大且超过阈值的匹配
                if (iou > IOU_THRESHOLD && iou > maxIoU) {
                    maxIoU = iou
                    bestMatchDetection = currentDetection
                }
            }

            if (bestMatchDetection != null) { // 找到匹配：更新跟踪信息并从待匹配列表中移除此检测
                trackedInfo.detection = bestMatchDetection
                trackedInfo.lostFramesCounter = 0 // 检测成功，重置丢失计数器
                unmatchedDetections.remove(bestMatchDetection) // 移除，防止被其他跟踪车辆重复匹配

                // 只有当车辆未处于 DONE 状态时才更新其状态
                if (trackedInfo.state != VehicleState.DONE) {
                    determineState(trackedInfo, imageWidth) // 调用 L->R 特定的状态逻辑
                }
                updatedTrackedVehiclesMap[id] = trackedInfo
            } else { // 当前帧未找到匹配，增加丢失计数器
                trackedInfo.lostFramesCounter++ // 只有当丢失计数器未超过最大阈值时，才保留此跟踪车辆
                if (trackedInfo.lostFramesCounter < MAX_LOST_FRAMES) {
                    updatedTrackedVehiclesMap[id] = trackedInfo
                } else { // 车辆丢失超过阈值，完全移除（不再添加到 updatedTrackedVehiclesMap）
                    LogFileUtils.writeTxtToFile("车辆 ${id} 丢失跟踪并已移除。")
                }
            }
        }

        // 阶段 2: 将剩余的未匹配检测视为新车辆
        unmatchedDetections.forEach { newDetection -> // unmatchedDetections already contains only new ones
            val newVehicleId = VehicleIdProvider.getNextId()
            updatedTrackedVehiclesMap[newVehicleId] = TrackedVehicleInfo(detection = newDetection,
                state = VehicleState.NEW,
                lastDetectionX = newDetection.boundingBox.centerX())
        }

        trackedVehicles = updatedTrackedVehiclesMap
    }

    /**
     * Calculates the Intersection over Union (IoU) of two bounding boxes.
     * 计算两个边界框的交并比 (IoU)。
     *
     * @param box1 The first bounding box (RectF).
     * @param box2 The second bounding box (RectF).
     * @return The IoU value (0.0 to 1.0).
     */
    private fun calculateIoU(box1: RectF, box2: RectF): Float { // 计算交集的矩形 (intersection)
        val intersection = RectF(maxOf(box1.left, box2.left),
            maxOf(box1.top, box2.top),
            minOf(box1.right, box2.right),
            minOf(box1.bottom, box2.bottom))

        // 如果交集无效 (宽度或高度小于等于0)，则没有交集，IoU为0
        if (intersection.width() <= 0 || intersection.height() <= 0) {
            return 0f
        }

        // 计算交集面积
        val intersectionArea = intersection.width() * intersection.height() // 计算两个边界框的各自面积
        val box1Area = box1.width() * box1.height()
        val box2Area = box2.width() * box2.height()

        // 计算并集面积 (Union = Area1 + Area2 - IntersectionArea)
        val unionArea = box1Area + box2Area - intersectionArea

        // 避免除以零
        if (unionArea == 0f) {
            return 0f
        }

        // 返回 IoU
        return intersectionArea / unionArea
    }

    /**
     * Determines and updates the next state of a vehicle based on its current position,
     * assuming it's moving from Left-to-Right. Triggers photo capture.
     * 根据车辆当前位置、假设其从左到右移动，确定并更新车辆状态。触发拍照。
     */
    private fun determineState(trackedVehicleInfo: TrackedVehicleInfo, imageWidth: Int) {
        val currentDetection = trackedVehicleInfo.detection
        val currentState = trackedVehicleInfo.state

        if (currentState == VehicleState.DONE) return

        val centerX_normalized = currentDetection.boundingBox.centerX() / imageWidth
        val rightX_normalized = currentDetection.boundingBox.right / imageWidth
        val currentDetectionX = currentDetection.boundingBox.centerX()

        val deltaX_normalized =
            (currentDetectionX - trackedVehicleInfo.lastDetectionX) / imageWidth // 如果 deltaX 显著为正，则车辆正在向右移动
        val isMovingRight = deltaX_normalized > MIN_MOVEMENT_THRESHOLD_NORMALIZED

        var potentialNextState = currentState

        when (currentState) {
            VehicleState.NEW -> { // 如果中心点越过 LR_LINE_A (从左侧进入的第一个线) 并且正在向右移动，则进入 APPROACHING 状态。
                if (centerX_normalized > LR_LINE_A_X_NORMALIZED && isMovingRight) {
                    potentialNextState = VehicleState.APPROACHING
                }
            }
            VehicleState.APPROACHING -> { // 如果中心点越过 LR_LINE_B (向中心移动的线) 并且正在向右移动，则进入 CENTERED 状态。
                if (centerX_normalized > LR_LINE_B_X_NORMALIZED && isMovingRight) {
                    potentialNextState = VehicleState.CENTERED
                }
            }
            VehicleState.CENTERED -> { // 如果车辆的右边缘越过 LR_LEAVING_THRESHOLD_RIGHT (右侧离开阈值) 并且正在向右移动，则进入 DONE 状态。
                if (rightX_normalized > LR_LEAVING_THRESHOLD_RIGHT_NORMALIZED && isMovingRight) {
                    potentialNextState = VehicleState.DONE
                }
            }
            else -> {} // 理论上不应该到达这里
        }

        // 应用状态转换滞后效应 (Hysteresis)
        if (potentialNextState != currentState) {
            trackedVehicleInfo.stateTransitionCandidateCounter++
            if (trackedVehicleInfo.stateTransitionCandidateCounter >= FRAME_CONFIRMATION_THRESHOLD) {
                trackedVehicleInfo.state = potentialNextState
                trackedVehicleInfo.stateTransitionCandidateCounter = 0

                // 检查拍照标志，确保每个状态只拍照一次
                when (trackedVehicleInfo.state) {
                    VehicleState.APPROACHING -> {
                        if (!trackedVehicleInfo.hasTakenApproachingPhoto) {
                            trackedVehicleInfo.hasTakenApproachingPhoto = true
                            handler.post { ToastUtil.showShortToast("车头 (从左到右)") }
                            takePictureCallback.invoke(trackedVehicleInfo.state.name)
                            LogFileUtils.writeTxtToFile("📸 触发拍照：车辆驶来 (车头) - 从左到右")
                        }
                    }
                    VehicleState.CENTERED -> {
                        if (!trackedVehicleInfo.hasTakenCenteredPhoto) {
                            trackedVehicleInfo.hasTakenCenteredPhoto = true
                            handler.post { ToastUtil.showShortToast("车身 (从左到右)") }
                            takePictureCallback.invoke(trackedVehicleInfo.state.name)
                            LogFileUtils.writeTxtToFile("📸 触发拍照：车辆靠近 (车身) - 从左到右")
                        }
                    }
                    VehicleState.DONE -> {
                        if (!trackedVehicleInfo.hasTakenDonePhoto) {
                            trackedVehicleInfo.hasTakenDonePhoto = true
                            handler.post { ToastUtil.showShortToast("车尾 (从左到右)") }
                            takePictureCallback.invoke(trackedVehicleInfo.state.name)
                            LogFileUtils.writeTxtToFile("📸 触发拍照：车辆驶离 (车尾) - 从左到右")
                        }
                    }
                    else -> {}
                }
            }
        } else { // 仅当车辆出现显著倒退 (向左移动) 时才重置计数器
            val movedSignificantlyLeft = deltaX_normalized < -MIN_MOVEMENT_THRESHOLD_NORMALIZED
            val hasRegressedPastCurrentStateEntry = when (currentState) {
                VehicleState.NEW -> false
                VehicleState.APPROACHING -> centerX_normalized < LR_LINE_A_X_NORMALIZED || movedSignificantlyLeft // 越过 LR_LINE_A 或显著向左移动
                VehicleState.CENTERED -> centerX_normalized < LR_LINE_B_X_NORMALIZED || movedSignificantlyLeft // 越过 LR_LINE_B 或显著向左移动
                VehicleState.DONE -> false
            }

            if (hasRegressedPastCurrentStateEntry) {
                trackedVehicleInfo.stateTransitionCandidateCounter = 0
            }
        }
        trackedVehicleInfo.lastDetectionX = currentDetectionX
    }

    /**
     * Resets the tracking data, clearing all tracked vehicles.
     * 重置跟踪数据，清除所有跟踪的车辆。
     */
    fun reset() {
        trackedVehicles.clear()
    }
}