package com.arvin.faceDetect.ui

import android.Manifest
import android.content.Context
import android.content.SharedPreferences
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.Matrix
import android.media.Image
import android.os.Bundle
import android.view.Menu
import android.view.MenuItem
import android.view.View
import android.view.ViewGroup
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageCapture
import androidx.camera.core.ImageProxy
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import androidx.lifecycle.lifecycleScope
import androidx.recyclerview.widget.DiffUtil
import androidx.recyclerview.widget.ListAdapter
import androidx.recyclerview.widget.RecyclerView
import com.arvin.faceDetect.R
import com.arvin.faceDetect.databinding.ActivityFaceAutoCollectBinding
import com.arvin.faceDetect.db.FaceDatabase
import com.arvin.faceDetect.db.FaceEntity
import com.arvin.faceDetect.ml.FaceFeatureExtractor
import com.arvin.faceDetect.model.CameraSettings
import com.arvin.faceDetect.utils.ImagePreprocessor
import com.arvin.faceDetect.utils.LogUtils
import com.arvin.faceDetect.utils.Settings
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.Face
import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetectorOptions
import com.google.mlkit.vision.face.FaceLandmark
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.NonCancellable
import kotlinx.coroutines.channels.Channel
import kotlinx.coroutines.channels.consumeEach
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import java.text.SimpleDateFormat
import java.util.Date
import java.util.Locale
import java.util.UUID
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean
import kotlin.math.abs
import kotlin.math.sqrt

class FaceAutoCollectActivity : AppCompatActivity(), 
    SharedPreferences.OnSharedPreferenceChangeListener {
    private var _binding: ActivityFaceAutoCollectBinding? = null
    private val binding get() = _binding!!
    private lateinit var cameraExecutor: ExecutorService
    private lateinit var faceFeatureExtractor: FaceFeatureExtractor
    private lateinit var preferences: SharedPreferences
    private var imageCapture: ImageCapture? = null
    private var currentFaceBitmap: Bitmap? = null
    private var faceFeatures: FloatArray? = null
    private var detectionQueue: Channel<Pair<List<Face>, Bitmap>>? = null
    private val isProcessing = AtomicBoolean(false)
    private var lastProcessTime = 0L
    private val collectedFeatures = mutableListOf<FloatArray>()
    private var latestFace: Face? = null
    private var latestBitmap: Bitmap? = null
    private var isInCooldown = false
    private var lastCollectionTime = 0L
    private var isProcessingFrame = false
    private var cameraFacing: Int = CameraSettings.CAMERA_FACING_FRONT
    private var isFrontCamera: Boolean = true
    private var isAutoCollectMode = true
    private var isWaitingForConfirmation = false
    private var collectionCount = 0
    private var isCollecting = false
    private var requiredFrames: Int = 0
    private var collectionThreshold: Float = 0f
    private var collectionInterval: Int = 0

    private val faceDetector by lazy {
        val options = FaceDetectorOptions.Builder()
            .setPerformanceMode(Settings.getCollectionPerformanceMode(this))
            .setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
            .setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL)
            .setMinFaceSize(Settings.getCollectionMinFaceSize(this))
            .setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
            .enableTracking()
            .build()
        FaceDetection.getClient(options)
    }

    private lateinit var collectedFacesAdapter: CollectedFacesAdapter
    private val recentCollectedFaces = mutableListOf<Pair<Bitmap, Long>>()

    override fun onCreate(savedInstanceState: Bundle?) {
        super.onCreate(savedInstanceState)
        _binding = ActivityFaceAutoCollectBinding.inflate(layoutInflater)
        setContentView(_binding!!.root)

        // 初始化SharedPreferences
        preferences = getSharedPreferences("face_detect_settings", Context.MODE_PRIVATE)
        preferences.registerOnSharedPreferenceChangeListener(this)

        // 初始化采集参数
        requiredFrames = Settings.getCollectionRequiredFrames(this)
        collectionThreshold = Settings.getCollectionThreshold(this)
        collectionInterval = Settings.getCollectionInterval(this)

        // 修改初始化顺序
        setupRecyclerView()  // 先初始化 RecyclerView
        setupUI()
        setupToolbar()       // 移到后面
        setupDetectionChannel()

        if (allPermissionsGranted()) {
            startCamera()
        } else {
            ActivityCompat.requestPermissions(
                this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
            )
        }

        cameraExecutor = Executors.newSingleThreadExecutor()
        faceFeatureExtractor = FaceFeatureExtractor(this)
        
        // 初始化采集数量显示
        updateCollectionCount()
    }

    override fun onSharedPreferenceChanged(sharedPreferences: SharedPreferences, key: String) {
        when (key) {
            Settings.KEY_COLLECTION_REQUIRED_FRAMES -> {
                requiredFrames = Settings.getCollectionRequiredFrames(this)
            }
            Settings.KEY_COLLECTION_THRESHOLD -> {
                collectionThreshold = Settings.getCollectionThreshold(this)
            }
            Settings.KEY_COLLECTION_INTERVAL -> {
                collectionInterval = Settings.getCollectionInterval(this)
            }
            Settings.KEY_SHOW_FACE_RECT -> {
                binding.faceOverlayView.setShowFaceRect(Settings.isShowFaceRect(this))
            }
            Settings.KEY_SHOW_EYE_RECT -> {
                binding.faceOverlayView.setShowEyeRect(Settings.isShowEyeRect(this))
            }
        }
    }

    override fun onDestroy() {
        super.onDestroy()
        cameraExecutor.shutdown()
        faceFeatureExtractor.release()
        preferences.unregisterOnSharedPreferenceChangeListener(this)
        lifecycleScope.launch(NonCancellable) {
            try {
                detectionQueue?.close()
                LogUtils.d(TAG, "通道已关闭")
            } catch (e: Exception) {
                LogUtils.e(TAG, "关闭通道时发生错误: ${e.message}")
            }
        }
    }

    private fun setupToolbar() {
        setSupportActionBar(binding.toolbar)
        supportActionBar?.setDisplayHomeAsUpEnabled(true)
        title = "人脸采集"
        binding.toolbar.inflateMenu(R.menu.menu_face_auto_collect)
        updateCollectionModeUI()
    }

    override fun onCreateOptionsMenu(menu: Menu): Boolean {
        menuInflater.inflate(R.menu.menu_face_auto_collect, menu)
        return true
    }

    override fun onOptionsItemSelected(item: MenuItem): Boolean {
        return when (item.itemId) {
            android.R.id.home -> {
                onBackPressed()
                true
            }

            R.id.action_collection_mode -> {
                toggleCollectionMode()
                true
            }

            else -> super.onOptionsItemSelected(item)
        }
    }

    private fun toggleCollectionMode() {
        isAutoCollectMode = !isAutoCollectMode
        updateCollectionModeUI()
        resetCollectionState()
    }

    private fun updateCollectionModeUI() {
        binding.toolbar.menu.findItem(R.id.action_collection_mode)?.apply {
            title = if (isAutoCollectMode) "切换手动采集" else "切换自动采集"
            setIcon(if (isAutoCollectMode) R.drawable.ic_manual_collect else R.drawable.ic_auto_collect)
        }

        binding.apply {
            // 根据模式显示/隐藏不同的布局
            autoCollectLayout.visibility = if (isAutoCollectMode) View.VISIBLE else View.GONE
            manualCollectLayout.visibility = if (isAutoCollectMode) View.GONE else View.VISIBLE
            rvCollectedFaces.visibility = if (isAutoCollectMode) View.VISIBLE else View.GONE

            // 更新状态文本
            if (isAutoCollectMode) {
                tvStatus.text = "自动采集模式：请保持人脸在框内..."
                // 显示已有的采集结果
                if (::collectedFacesAdapter.isInitialized) {
                    updateCollectedFacesDisplay()
                }
            } else {
                tvManualStatus.text = if (isWaitingForConfirmation) {
                    "请输入姓名并保存"
                } else {
                    "手动采集模式：请保持人脸在框内..."
                }
            }
        }
    }

    private fun resetCollectionState() {
        isWaitingForConfirmation = false
        currentFaceBitmap = null
        faceFeatures = null
        binding.btnSave.isEnabled = false
        binding.etName.text?.clear()
        // 重置采集数量显示
        updateCollectionCount()
    }

    private fun setupUI() {
        binding.btnSave.setOnClickListener {
            saveFace()
        }

        binding.etName.addTextChangedListener(object : android.text.TextWatcher {
            override fun beforeTextChanged(s: CharSequence?, start: Int, count: Int, after: Int) {}
            override fun onTextChanged(s: CharSequence?, start: Int, before: Int, count: Int) {}
            override fun afterTextChanged(s: android.text.Editable?) {
                binding.btnSave.isEnabled = !s.isNullOrBlank() && currentFaceBitmap != null
            }
        })

        // 初始化状态文本
        binding.tvStatus.text = "等待检测到合格人脸..."
        binding.tvCollectedCount.text = "已采集：0张"

        // 初始化人脸框和眼睛框的显示状态
        binding.faceOverlayView.apply {
            setShowFaceRect(Settings.isShowFaceRect(this@FaceAutoCollectActivity))
            setShowEyeRect(Settings.isShowEyeRect(this@FaceAutoCollectActivity))
            setPreviewSize(binding.viewFinder.width, binding.viewFinder.height)
        }
    }

    private fun startCamera() {
        val cameraProviderFuture = ProcessCameraProvider.getInstance(this)

        cameraProviderFuture.addListener({
            val cameraProvider = cameraProviderFuture.get()

            // 从设置中获取目标分辨率
            val targetResolution = Settings.getCameraResolutionSize(this)
            LogUtils.d(TAG, "目标分辨率: ${targetResolution.width}x${targetResolution.height}")

            val preview = Preview.Builder()
                .setTargetResolution(targetResolution)
                .build()
                .also {
                    it.setSurfaceProvider(binding.viewFinder.surfaceProvider)
                }

            imageCapture = ImageCapture.Builder()
                .setCaptureMode(ImageCapture.CAPTURE_MODE_MINIMIZE_LATENCY)
                .build()

            val imageAnalyzer = ImageAnalysis.Builder()
                .setTargetResolution(targetResolution)
                .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                .build()
                .also {
                    it.setAnalyzer(cameraExecutor, FaceAnalyzer())
                }

            try {
                cameraProvider.unbindAll()
                cameraFacing = Settings.getCameraFacing(this)
                isFrontCamera = cameraFacing == CameraSettings.CAMERA_FACING_FRONT
                val cameraSelector = when (cameraFacing) {
                    CameraSettings.CAMERA_FACING_FRONT -> CameraSelector.DEFAULT_FRONT_CAMERA
                    CameraSettings.CAMERA_FACING_BACK -> CameraSelector.DEFAULT_BACK_CAMERA
                    else -> CameraSelector.DEFAULT_FRONT_CAMERA
                }

                cameraProvider.bindToLifecycle(
                    this,
                    cameraSelector,
                    preview,
                    imageCapture,
                    imageAnalyzer
                )

                // 设置预览尺寸和显示状态
                binding.viewFinder.post {
                    binding.faceOverlayView.apply {
                        setPreviewSize(binding.viewFinder.width, binding.viewFinder.height)
                        setShowFaceRect(Settings.isShowFaceRect(this@FaceAutoCollectActivity))
                        setShowEyeRect(Settings.isShowEyeRect(this@FaceAutoCollectActivity))
                    }
                }
            } catch (e: Exception) {
                LogUtils.e(TAG, "相机启动失败", e)
            }
        }, ContextCompat.getMainExecutor(this))
    }

    private fun saveFace() {
        val name = binding.etName.text?.toString()?.trim()
        val faceBitmap = currentFaceBitmap
        val currentFaceFeatures = faceFeatures

        if (name.isNullOrBlank() || faceBitmap == null || currentFaceFeatures == null) {
            Toast.makeText(this, "请输入姓名并等待人脸检测和特征提取完成", Toast.LENGTH_SHORT)
                .show()
            return
        }

        lifecycleScope.launch {
            saveFaceToDatabase(name, faceBitmap, currentFaceFeatures, System.currentTimeMillis())
        }
    }

    private suspend fun saveFaceToDatabase(
        name: String,
        faceBitmap: Bitmap,
        features: FloatArray,
        timestamp: Long
    ) {
        try {
            withContext(Dispatchers.IO) {
                // 创建人脸实体
                val faceEntity = FaceEntity(
                    id = UUID.randomUUID().toString(),
                    name = name,
                    faceImage = faceBitmap,
                    faceFeatures = features,
                    createTime = timestamp
                )

                // 保存到数据库
                FaceDatabase.getDatabase(this@FaceAutoCollectActivity)
                    .faceDao()
                    .insertFace(faceEntity)
            }

            // 更新UI
            withContext(Dispatchers.Main) {
                Toast.makeText(this@FaceAutoCollectActivity, "保存成功", Toast.LENGTH_SHORT).show()
                
                // 更新最近采集的人脸列表
                try {
                    val processedFace = processFinalFaceImage()
                    recentCollectedFaces.add(0, processedFace to timestamp)
                    // 限制显示最近10张人脸
                    if (recentCollectedFaces.size > 10) {
                        recentCollectedFaces.removeAt(recentCollectedFaces.size - 1)
                    }
                    updateCollectedFacesDisplay()
                } catch (e: Exception) {
                    LogUtils.e(TAG, "处理采集人脸失败", e)
                }

                // 重置状态
                resetCollectionState()
                updateCollectionCount()
            }
        } catch (e: Exception) {
            withContext(Dispatchers.Main) {
                LogUtils.e(TAG, "保存人脸失败", e)
                Toast.makeText(this@FaceAutoCollectActivity, "保存失败: ${e.message}", Toast.LENGTH_SHORT).show()
            }
        }
    }

    private fun updateCollectedFacesDisplay() {
        val sortedFaces = recentCollectedFaces.sortedByDescending { it.second }
        collectedFacesAdapter.submitList(sortedFaces)
        // 更新显示的采集数量
        updateCollectionCount()
    }

    private fun updateCollectionCount() {
        // 使用实际显示的人脸数量
        collectionCount = recentCollectedFaces.size
        binding.tvCollectedCount.text = "已采集：${collectionCount}张"
    }

    override fun onRequestPermissionsResult(
        requestCode: Int,
        permissions: Array<out String>,
        grantResults: IntArray
    ) {
        super.onRequestPermissionsResult(requestCode, permissions, grantResults)
        if (requestCode == REQUEST_CODE_PERMISSIONS) {
            if (allPermissionsGranted()) {
                startCamera()
            } else {
                Toast.makeText(this, "未授予必要权限", Toast.LENGTH_SHORT).show()
                finish()
            }
        }
    }

    private inner class FaceAnalyzer : ImageAnalysis.Analyzer {
        @androidx.camera.core.ExperimentalGetImage
        override fun analyze(imageProxy: ImageProxy) {
            val mediaImage = imageProxy.image
            if (mediaImage != null) {
                val rotation = imageProxy.imageInfo.rotationDegrees
                val image = InputImage.fromMediaImage(mediaImage, rotation)

                faceDetector.process(image)
                    .addOnSuccessListener { faces ->
                        val bitmap = mediaImage.toBitmap()
                        val matrix = Matrix()
                        if (isFrontCamera) {
                            matrix.postScale(-1f, 1f)
                        }
                        matrix.postRotate(rotation.toFloat())

                        val rotatedBitmap = Bitmap.createBitmap(
                            bitmap,
                            0,
                            0,
                            bitmap.width,
                            bitmap.height,
                            matrix,
                            true
                        )

                        // 更新人脸框显示
                        binding.faceOverlayView.updateFaces(
                            faces,
                            Settings.isShowFaceRect(this@FaceAutoCollectActivity),
                            rotatedBitmap.width,
                            rotatedBitmap.height,
                            0,
                            isFrontCamera,
                            Settings.isShowEyeRect(this@FaceAutoCollectActivity)
                        )

                        // 更新检测提示
                        updateDetectionHint(faces.firstOrNull(), rotatedBitmap)

                        if (faces.isNotEmpty()) {
                            val face = faces[0]
                            if (isFaceValid(face, rotatedBitmap)) {
                                if (!isProcessingFrame) {
                                    isProcessingFrame = true
                                    lifecycleScope.launch {
                                        try {
                                            processFaceDetectionResult(faces, rotatedBitmap)
                                        } catch (e: Exception) {
                                            LogUtils.e(TAG, "处理人脸失败", e)
                                        } finally {
                                            isProcessingFrame = false
                                        }
                                    }
                                }
                            }
                        }
                    }
                    .addOnFailureListener { e ->
                        LogUtils.e(TAG, "人脸检测失败", e)
                        binding.tvDetectionHint.text = "人脸检测失败，请重试"
                    }
                    .addOnCompleteListener {
                        imageProxy.close()
                    }
            } else {
                imageProxy.close()
            }
        }

        private fun Image.toBitmap(): Bitmap {
            val yBuffer = planes[0].buffer
            val uBuffer = planes[1].buffer
            val vBuffer = planes[2].buffer

            val ySize = yBuffer.remaining()
            val uSize = uBuffer.remaining()
            val vSize = vBuffer.remaining()

            val nv21 = ByteArray(ySize + uSize + vSize)

            yBuffer.get(nv21, 0, ySize)
            vBuffer.get(nv21, ySize, vSize)
            uBuffer.get(nv21, ySize + vSize, uSize)

            val yuvImage = android.graphics.YuvImage(
                nv21,
                android.graphics.ImageFormat.NV21,
                width,
                height,
                null
            )

            val out = java.io.ByteArrayOutputStream()
            yuvImage.compressToJpeg(android.graphics.Rect(0, 0, width, height), 100, out)
            val imageBytes = out.toByteArray()
            return android.graphics.BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
        }
    }

    private fun setupDetectionChannel() {
        detectionQueue = Channel(Channel.BUFFERED)

        // 启动协程处理检测任务
        lifecycleScope.launch {
            detectionQueue?.let { channel ->
                try {
                    // 使用consumeEach替代for循环
                    channel.consumeEach { task ->
                        val (faces, bitmap) = task
                        processDetectionTask(faces, bitmap)
                    }
                } catch (e: Exception) {
                    LogUtils.e(TAG, "检测通道异常: ${e.message}")
                }
            }
        }
    }

    private fun setupRecyclerView() {
        collectedFacesAdapter = CollectedFacesAdapter()
        binding.rvCollectedFaces.apply {
            adapter = collectedFacesAdapter
            layoutManager = androidx.recyclerview.widget.GridLayoutManager(
                this@FaceAutoCollectActivity,
                5  // 每行显示5个
            ).apply {
                orientation = androidx.recyclerview.widget.GridLayoutManager.VERTICAL
            }
            setHasFixedSize(true)
            addItemDecoration(object : RecyclerView.ItemDecoration() {
                override fun getItemOffsets(
                    outRect: android.graphics.Rect,
                    view: View,
                    parent: RecyclerView,
                    state: RecyclerView.State
                ) {
                    // 设置item的间距
                    val spacing = 8.dpToPx(parent.context)
                    outRect.left = spacing / 2
                    outRect.right = spacing / 2
                    outRect.top = spacing / 2
                    outRect.bottom = spacing / 2
                }
            })
            clipToPadding = false
            setPadding(8.dpToPx(context), 8.dpToPx(context), 8.dpToPx(context), 8.dpToPx(context))
        }
    }

    private suspend fun processDetectionTask(faces: List<Face>, bitmap: Bitmap) {
        // 检查是否在冷却状态
        if (isInCooldown || isProcessing.get()) {
            return
        }

        val face = faces.firstOrNull() ?: return

        // 检查人脸质量
        if (!checkFaceQuality(face, bitmap)) {
            return
        }

        lifecycleScope.launch {
            isProcessing.set(true)
            try {
                // 更新最新的人脸和图像数据
                latestFace = face
                latestBitmap = bitmap.copy(bitmap.config, true)

                // 提取当前人脸特征
                val features = withContext(Dispatchers.Default) {
                    val faceBitmap = faceFeatureExtractor.cropFace(bitmap, face.boundingBox)
                    val processedBitmap = ImagePreprocessor.process(
                        faceBitmap,
                        shouldNormalizeLighting = true,
                        shouldEnhanceContrast = true,
                        shouldEqualizeHistogram = true
                    )
                    faceFeatureExtractor.extractFeatures(processedBitmap)
                }

                // 添加到已采集特征列表
                collectedFeatures.add(features)

                // 更新UI显示采集进度
                withContext(Dispatchers.Main) {
                    binding.tvStatus.text =
                        "采集中(已采集${collectedFeatures.size}/${requiredFrames}张)..."
                    binding.tvCollectedCount.text = "已采集：${collectedFeatures.size}张"

                    // 如果达到目标数量，完成采集
                    if (collectedFeatures.size >= requiredFrames) {
                        finishCollection()
                    }
                }

            } catch (e: Exception) {
                LogUtils.e(TAG, "处理人脸出错", e)
            } finally {
                isProcessing.set(false)
            }
        }
    }

    private fun checkFaceQuality(face: Face, bitmap: Bitmap): Boolean {
        // 检查人脸大小
        val minSize =
            Math.min(bitmap.width, bitmap.height) * Settings.getCollectionMinFaceSize(this)
        val maxSize = Math.min(bitmap.width, bitmap.height) * Settings.getMaxFaceSize(this)
        val faceSize = Math.max(face.boundingBox.width(), face.boundingBox.height())

        if (faceSize < minSize || faceSize > maxSize) {
            LogUtils.d(TAG, "人脸大小不合适: $faceSize (限制范围: $minSize-$maxSize)")
            binding.tvStatus.text = "请调整人脸大小..."
            return false
        }

        // 检查人脸角度
        val eulerX = face.headEulerAngleX // 俯仰角
        val eulerY = face.headEulerAngleY // 偏航角
        val eulerZ = face.headEulerAngleZ // 滚转角

        if (abs(eulerX) > MAX_FACE_ANGLE || abs(eulerY) > MAX_FACE_ANGLE || abs(eulerZ) > MAX_FACE_ANGLE) {
            LogUtils.d(TAG, "人脸角度过大: X=$eulerX, Y=$eulerY, Z=$eulerZ")
            binding.tvStatus.text = "请保持人脸正对摄像头..."
            return false
        }

        return true
    }

    private suspend fun finishCollection() {
        try {
            withContext(Dispatchers.Main) {
                binding.tvStatus.text = "正在保存..."
            }

            // 检查收集的特征是否有效
            if (collectedFeatures.isEmpty()) {
                throw IllegalStateException("没有收集到人脸特征")
            }

            // 计算平均特征
            val averageFeatures = calculateAverageFeatures(collectedFeatures)

            // 处理最终的人脸图像
            val finalFaceBitmap = processFinalFaceImage()

            val name = binding.etName.text?.toString()?.trim()
            if (name.isNullOrBlank()) {
                throw IllegalStateException("请输入姓名")
            }

            // 保存到数据库
            val faceEntity = FaceEntity(
                id = UUID.randomUUID().toString(),
                name = name,
                faceImage = finalFaceBitmap,
                faceFeatures = averageFeatures,  // 直接使用FloatArray类型的特征
                createTime = System.currentTimeMillis()
            )

            withContext(Dispatchers.IO) {
                FaceDatabase.getDatabase(this@FaceAutoCollectActivity)
                    .faceDao()
                    .insertFace(faceEntity)
            }

            // 更新UI
            handleCollectionSuccess()

        } catch (e: Exception) {
            handleCollectionError(e)
        }
    }

    private fun calculateAverageFeatures(features: List<FloatArray>): FloatArray {
        val featureLength = features[0].size
        val averageFeatures = FloatArray(featureLength)

        for (i in averageFeatures.indices) {
            averageFeatures[i] = features.map { it[i] }.average().toFloat()
        }

        return averageFeatures
    }

    private suspend fun processFinalFaceImage(): Bitmap {
        return withContext(Dispatchers.Default) {
            val currentBitmap = latestBitmap ?: throw IllegalStateException("没有有效的人脸图像")
            val currentFace = latestFace ?: throw IllegalStateException("没有有效的人脸数据")

            // 1. 先将整个图片调整为竖直方向
            val matrix = Matrix()
            if (currentBitmap.width > currentBitmap.height) {
                matrix.postRotate(90f)
            }
            
            val rotatedBitmap = Bitmap.createBitmap(
                currentBitmap,
                0,
                0,
                currentBitmap.width,
                currentBitmap.height,
                matrix,
                true
            )

            // 2. 调整人脸框的坐标以匹配旋转后的图片
            val box = currentFace.boundingBox
            val rotatedBox = if (isFrontCamera) {
                // 前置相机需要水平镜像
                android.graphics.RectF(
                    (rotatedBitmap.width - box.top - box.height()).toFloat(),
                    box.left.toFloat(),
                    (rotatedBitmap.width - box.top).toFloat(),
                    box.right.toFloat()
                )
            } else {
                // 后置相机只需要转换坐标
                android.graphics.RectF(
                    box.top.toFloat(),
                    box.left.toFloat(),
                    (box.top + box.height()).toFloat(),
                    box.right.toFloat()
                )
            }

            // 3. 扩大裁剪区域，确保人脸完整性
            val expandRatio = 1.5f
            val centerX = rotatedBox.centerX()
            val centerY = rotatedBox.centerY()
            val expandedWidth = (rotatedBox.width() * expandRatio).toInt()
            val expandedHeight = (expandedWidth * 1.25f).toInt()  // 4:5 比例

            // 4. 计算裁剪区域
            val left = (centerX - expandedWidth / 2).toInt().coerceAtLeast(0)
            val top = (centerY - expandedHeight / 2).toInt().coerceAtLeast(0)
            val right = (left + expandedWidth).coerceAtMost(rotatedBitmap.width)
            val bottom = (top + expandedHeight).coerceAtMost(rotatedBitmap.height)

            // 5. 裁剪最终图片
            val finalBitmap = Bitmap.createBitmap(
                rotatedBitmap,
                left,
                top,
                right - left,
                bottom - top
            )

            // 6. 如果是前置相机，需要水平镜像
            val mirroredBitmap = if (isFrontCamera) {
                val mirrorMatrix = Matrix()
                mirrorMatrix.setScale(-1f, 1f)
                Bitmap.createBitmap(
                    finalBitmap,
                    0,
                    0,
                    finalBitmap.width,
                    finalBitmap.height,
                    mirrorMatrix,
                    true
                )
            } else {
                finalBitmap
            }

            // 7. 调整到固定分辨率
            val targetWidth = 400  // 固定宽度
            val targetHeight = 500  // 固定高度，保持4:5比例
            Bitmap.createScaledBitmap(mirroredBitmap, targetWidth, targetHeight, true)
        }
    }

    private fun rotateBitmap(bitmap: Bitmap, degrees: Float): Bitmap {
        val matrix = Matrix()

        // 根据相机朝向调整旋转角度
        val rotationDegrees = when (cameraFacing) {
            CameraSettings.CAMERA_FACING_FRONT -> {
                // 前置相机需要水平镜像和旋转
                matrix.preScale(-1f, 1f)
                90f  // 修改为90度，确保图像正向
            }
            CameraSettings.CAMERA_FACING_BACK -> 90f  // 后置相机旋转90度
            else -> 90f
        }

        matrix.postRotate(rotationDegrees)
        return Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, true)
    }

    private suspend fun handleCollectionSuccess() {
        withContext(Dispatchers.Main) {
            binding.tvStatus.text = "采集完成！请更换其他人脸..."
            Toast.makeText(this@FaceAutoCollectActivity, "人脸采集成功", Toast.LENGTH_SHORT).show()

            // 更新最近采集的人脸列表，使用处理过的人脸图片
            try {
                val processedFace = processFinalFaceImage()
                val timestamp = System.currentTimeMillis()
                recentCollectedFaces.add(0, processedFace to timestamp)
                // 限制显示最近10张人脸
                if (recentCollectedFaces.size > 10) {
                    recentCollectedFaces.removeAt(recentCollectedFaces.size - 1)
                }
                updateCollectedFacesDisplay()
            } catch (e: Exception) {
                LogUtils.e(TAG, "处理采集人脸失败", e)
            }

            // 重置采集数据
            collectedFeatures.clear()
            binding.tvCollectedCount.text = "已采集：0张"

            // 设置冷却状态
            isInCooldown = true
            lastCollectionTime = System.currentTimeMillis()

            // 延迟后重置冷却状态
            delay(COLLECTION_COOLDOWN)
            if (!isFinishing) {
                isInCooldown = false
                binding.tvStatus.text = "等待检测到合格人脸..."
            }
        }
    }

    private suspend fun handleCollectionError(e: Exception) {
        withContext(Dispatchers.Main) {
            binding.tvStatus.text = "采集失败，请重试..."
            Toast.makeText(
                this@FaceAutoCollectActivity,
                "保存失败: ${e.message}",
                Toast.LENGTH_SHORT
            ).show()
            isInCooldown = false
            // 清理已收集的特征
            collectedFeatures.clear()
            binding.tvCollectedCount.text = "已采集：0张"
        }
    }

    private suspend fun processFaceDetectionResult(faces: List<Face>, bitmap: Bitmap) {
        if (isWaitingForConfirmation && !isAutoCollectMode) {
            return
        }

        val face = faces.firstOrNull() ?: return
        if (!isFaceValid(face, bitmap)) return

        // 使用ImagePreprocessor处理人脸图像
        val croppedFace = withContext(Dispatchers.Default) {
            val faceBitmap = cropFace(bitmap, face.boundingBox)
            val rotatedBitmap = rotateBitmap(faceBitmap, 0f)
            ImagePreprocessor.process(
                rotatedBitmap,
                shouldNormalizeLighting = true,
                shouldEnhanceContrast = true,
                shouldEqualizeHistogram = true
            )
        }

        // 保存当前的人脸图片
        latestBitmap = croppedFace.copy(croppedFace.config, true)
        latestFace = face

        val features = faceFeatureExtractor.extractFeatures(croppedFace)
        
        if (isAutoCollectMode) {
            handleAutoCollection(features, croppedFace)
        } else {
            handleManualCollection(features, croppedFace)
        }
    }

    private suspend fun handleAutoCollection(features: FloatArray, faceBitmap: Bitmap) {
        // 检查是否与最近采集的人脸相似
        val currentTime = System.currentTimeMillis()
        val isSimilarToRecent = checkSimilarToRecentFace(features)
        
        if (isSimilarToRecent) {
            // 如果相似，检查是否超过采集间隔
            if (currentTime - lastCollectionTime < collectionInterval * 1000) {
                withContext(Dispatchers.Main) {
                    val remainingSeconds = ((collectionInterval * 1000) - (currentTime - lastCollectionTime)) / 1000
                    binding.tvStatus.text = "该人脸刚刚采集过，请等待${remainingSeconds}秒后再次采集..."
                }
                return
            }
        }

        if (!isCollecting) {
            // 开始新的采集
            isCollecting = true
            collectedFeatures.clear()
            // 保存当前的人脸图片
            latestBitmap = faceBitmap.copy(faceBitmap.config, true)
        }

        // 检查是否已经采集完成
        if (collectedFeatures.size >= requiredFrames) {
            return
        }

        // 添加新的特征
        collectedFeatures.add(features)
        
        withContext(Dispatchers.Main) {
            binding.tvStatus.text = "采集中 (${collectedFeatures.size}/$requiredFrames)"
        }

        // 检查是否采集足够的帧
        if (collectedFeatures.size >= requiredFrames) {
            // 计算平均特征
            val averageFeatures = calculateAverageFeatures(collectedFeatures)
            
            // 使用平均特征进行查重
            val isFaceExists = checkFaceExistsWithThreshold(averageFeatures, collectionThreshold)

            if (!isFaceExists) {
                // 生成名称并保存
                val timestamp = System.currentTimeMillis()
                val dateFormat = SimpleDateFormat("yyyyMMdd_HHmmss", Locale.getDefault())
                val name = "Face_${dateFormat.format(Date(timestamp))}"

                // 处理最终的人脸图像
                val finalFaceBitmap = processFinalFaceImage()

                // 保存到数据库
                saveFaceToDatabase(name, finalFaceBitmap, averageFeatures, timestamp)
                updateCollectionCount()
                
                withContext(Dispatchers.Main) {
                    binding.tvStatus.text = "采集成功！"
                }
            } else {
                withContext(Dispatchers.Main) {
                    binding.tvStatus.text = "该人脸已存在，请更换其他人脸"
                }
            }

            // 重置采集状态
            isCollecting = false
            collectedFeatures.clear()
            
            // 更新最后采集时间
            lastCollectionTime = currentTime
            
            // 添加冷却时间
            delay(2000)
        }
    }

    // 检查是否与最近采集的人脸相似
    private suspend fun checkSimilarToRecentFace(features: FloatArray): Boolean {
        // 获取最近一次采集的人脸
        val recentFace = withContext(Dispatchers.IO) {
            FaceDatabase.getDatabase(this@FaceAutoCollectActivity)
                .faceDao()
                .getLastCollectedFace()
        } ?: return false

        // 计算相似度
        val similarity = calculateCosineSimilarity(features, recentFace.faceFeatures)
        return similarity >= collectionThreshold
    }

    private suspend fun handleManualCollection(features: FloatArray, faceBitmap: Bitmap) {
        withContext(Dispatchers.Main) {
            // 保存当前的人脸特征和图像
            currentFaceBitmap = faceBitmap
            faceFeatures = features
            
            // 更新UI显示
            binding.btnSave.isEnabled = !binding.etName.text.isNullOrBlank()
            
            // 更新状态
            isWaitingForConfirmation = true
            binding.tvManualStatus.text = "请输入姓名并保存"
            
            // 检查是否存在相似人脸
            val isFaceExists = checkFaceExistsWithThreshold(features, collectionThreshold)
            if (isFaceExists) {
                Toast.makeText(this@FaceAutoCollectActivity, "警告：检测到相似人脸", Toast.LENGTH_SHORT).show()
            }
        }
    }

    private suspend fun checkFaceExistsWithThreshold(features: FloatArray, threshold: Float): Boolean {
        val faceDao = FaceDatabase.getDatabase(this).faceDao()
        val allFaces = withContext(Dispatchers.IO) {
            faceDao.getAllFacesForComparison()
        }

        LogUtils.d(TAG, "开始查重检查，采集阈值: $threshold")
        
        return allFaces.any { face ->
            val similarity = calculateCosineSimilarity(features, face.faceFeatures)
            LogUtils.d(TAG, "与 ${face.name} 的相似度: $similarity")
            similarity >= threshold
        }
    }

    private fun calculateCosineSimilarity(features1: FloatArray, features2: FloatArray): Float {
        var dotProduct = 0f
        var norm1 = 0f
        var norm2 = 0f

        for (i in features1.indices) {
            dotProduct += features1[i] * features2[i]
            norm1 += features1[i] * features1[i]
            norm2 += features2[i] * features2[i]
        }

        return dotProduct / (sqrt(norm1) * sqrt(norm2))
    }

    private fun updateDetectionHint(face: Face?, bitmap: Bitmap) {
        val hintText = when {
            face == null -> "未检测到人脸，请将人脸保持在框内"
            !isFaceValid(face, bitmap) -> {
                when {
                    face.boundingBox.left < 0 || face.boundingBox.top < 0 ||
                            face.boundingBox.right > bitmap.width || face.boundingBox.bottom > bitmap.height ->
                        "人脸超出图像范围，请调整位置"

                    face.boundingBox.width() < bitmap.width * Settings.getMinFaceSize(this) ||
                            face.boundingBox.height() < bitmap.height * Settings.getMinFaceSize(this) ->
                        "人脸太小，请靠近摄像头"

                    face.boundingBox.width() > bitmap.width * Settings.getMaxFaceSize(this) ||
                            face.boundingBox.height() > bitmap.height * Settings.getMaxFaceSize(this) ->
                        "人脸太大，请远离摄像头"

                    abs(face.headEulerAngleX) > MAX_FACE_ANGLE ||
                            abs(face.headEulerAngleY) > MAX_FACE_ANGLE ||
                            abs(face.headEulerAngleZ) > MAX_FACE_ANGLE ->
                        "人脸角度过大，请保持正对摄像头"

                    face.trackingId == null ->
                        "人脸不稳定，请保持静止"

                    !checkFaceLandmarks(face) -> {
                        val nose = face.getLandmark(FaceLandmark.NOSE_BASE)
                        val mouthBottom = face.getLandmark(FaceLandmark.MOUTH_BOTTOM)
                        val leftEye = face.getLandmark(FaceLandmark.LEFT_EYE)
                        val rightEye = face.getLandmark(FaceLandmark.RIGHT_EYE)

                        if (nose != null && mouthBottom != null) {
                            val noseToMouthDistance = abs(mouthBottom.position.y - nose.position.y)
                            val expectedDistance = face.boundingBox.height() * 0.25f
                            if (noseToMouthDistance < expectedDistance * 0.6f) {
                                "请摘下口罩，确保人脸完整"
                            } else if (leftEye != null && rightEye != null &&
                                abs(leftEye.position.y - rightEye.position.y) > face.boundingBox.height() * 0.1f
                            ) {
                                "请调整眼镜位置，确保眼睛清晰可见"
                            } else {
                                "请确保人脸完整，不要遮挡"
                            }
                        } else {
                            "请确保人脸完整，不要遮挡"
                        }
                    }

                    else -> "请保持人脸在框内"
                }
            }

            else -> "人脸检测正常"
        }

        binding.tvDetectionHint.text = hintText
    }

    private fun isFaceValid(face: Face, bitmap: Bitmap): Boolean {
        // 检查人脸边界框是否在图像范围内
        val box = face.boundingBox
        if (box.left < 0 || box.top < 0 ||
            box.right > bitmap.width || box.bottom > bitmap.height
        ) {
            return false
        }

        // 检查人脸大小是否合适
        val minSize = Math.min(bitmap.width, bitmap.height) * Settings.getMinFaceSize(this)
        val maxSize = Math.min(bitmap.width, bitmap.height) * Settings.getMaxFaceSize(this)
        val faceSize = Math.max(box.width(), box.height())

        if (faceSize < minSize || faceSize > maxSize) {
            return false
        }

        // 检查人脸角度（±20度）
        val eulerX = face.headEulerAngleX // 俯仰角
        val eulerY = face.headEulerAngleY // 偏航角
        val eulerZ = face.headEulerAngleZ // 滚转角

        if (abs(eulerX) > MAX_FACE_ANGLE || abs(eulerY) > MAX_FACE_ANGLE || abs(eulerZ) > MAX_FACE_ANGLE) {
            return false
        }

        // 检查人脸关键点完整性
        if (!checkFaceLandmarks(face)) {
            return false
        }

        // 检查人脸质量
        if (face.trackingId == null) {
            return false
        }

        return true
    }

    private fun checkFaceLandmarks(face: Face): Boolean {
        // 检查鼻子关键点
        val nose = face.getLandmark(FaceLandmark.NOSE_BASE)
        if (nose == null) {
            LogUtils.d(TAG, "未检测到鼻子关键点")
            return false
        }

        // 检查嘴巴关键点 - 只需要检测到嘴巴底部即可
        val mouthBottom = face.getLandmark(FaceLandmark.MOUTH_BOTTOM)
        if (mouthBottom == null) {
            LogUtils.d(TAG, "未检测到嘴巴关键点")
            return false
        }

        // 检查眼睛关键点
        val leftEye = face.getLandmark(FaceLandmark.LEFT_EYE)
        val rightEye = face.getLandmark(FaceLandmark.RIGHT_EYE)

        if (leftEye == null || rightEye == null) {
            LogUtils.d(TAG, "未检测到眼睛关键点")
            return false
        }

        // 检查口罩 - 通过鼻子和嘴巴之间的距离来判断
        val noseY = nose.position.y
        val mouthY = mouthBottom.position.y
        val faceHeight = face.boundingBox.height()

        // 正常情况下，鼻子到嘴巴的距离应该占人脸高度的20%-30%
        val noseToMouthDistance = abs(mouthY - noseY)
        val expectedDistance = faceHeight * 0.25f

        // 如果实际距离小于预期距离的60%，可能戴着口罩
        if (noseToMouthDistance < expectedDistance * 0.6f) {
            LogUtils.d(
                TAG,
                "检测到可能佩戴口罩，鼻子到嘴巴距离异常: $noseToMouthDistance < ${expectedDistance * 0.6f}"
            )
            return false
        }

        // 检查眼睛区域是否被遮挡
        val leftEyeY = leftEye.position.y
        val rightEyeY = rightEye.position.y
        val eyeHeight = faceHeight * 0.1f // 眼睛区域高度占人脸高度的10%

        // 如果眼睛位置异常，可能被眼镜遮挡
        if (abs(leftEyeY - rightEyeY) > eyeHeight) {
            LogUtils.d(TAG, "眼睛位置异常，可能被眼镜遮挡")
            return false
        }

        return true
    }

    private fun cropFace(bitmap: Bitmap, boundingBox: android.graphics.Rect): Bitmap {
        // 1. 使用人脸框的中心点作为基准点，确保人脸居中
        val centerX = boundingBox.centerX()
        val centerY = boundingBox.centerY()
        
        // 2. 获取原始人脸框的宽高
        val originalWidth = boundingBox.width()
        val originalHeight = boundingBox.height()
        
        // 3. 使用较大的边作为基准尺寸，确保完整包含人脸
        // 这样可以避免在人脸宽高比例不一致时出现裁剪不完整的情况
        val maxDimension = Math.max(originalWidth, originalHeight)
        
        // 4. 计算扩展后的尺寸，保持4:5的宽高比
        // expandRatio=1.8f 表示在原始人脸框基础上扩大1.8倍，以包含更多背景
        val expandRatio = 1.8f
        val expandedHeight = (maxDimension * expandRatio).toInt()
        // 宽度为高度的0.8倍，保持4:5比例，符合证件照比例
        val expandedWidth = (expandedHeight * 0.8f).toInt()
        
        // 5. 以人脸中心点为基准，计算裁剪区域的边界
        var cropLeft = (centerX - expandedWidth / 2)
        var cropTop = (centerY - expandedHeight / 2)
        var cropRight = cropLeft + expandedWidth
        var cropBottom = cropTop + expandedHeight
        
        // 6. 处理裁剪区域超出图片边界的情况
        // 通过整体移动裁剪区域，保持人脸居中的同时确保不超出边界
        if (cropLeft < 0) {
            cropRight -= cropLeft  // 向右移动整个区域
            cropLeft = 0
        }
        if (cropTop < 0) {
            cropBottom -= cropTop  // 向下移动整个区域
            cropTop = 0
        }
        if (cropRight > bitmap.width) {
            cropLeft -= (cropRight - bitmap.width)  // 向左移动整个区域
            cropRight = bitmap.width
        }
        if (cropBottom > bitmap.height) {
            cropTop -= (cropBottom - bitmap.height)  // 向上移动整个区域
            cropBottom = bitmap.height
        }
        
        // 7. 确保所有坐标都在有效范围内
        cropLeft = cropLeft.coerceIn(0, bitmap.width - 1)
        cropTop = cropTop.coerceIn(0, bitmap.height - 1)
        cropRight = cropRight.coerceIn(1, bitmap.width)
        cropBottom = cropBottom.coerceIn(1, bitmap.height)
        
        // 8. 计算最终的裁剪尺寸，确保至少有1个像素
        val finalWidth = (cropRight - cropLeft).coerceAtLeast(1)
        val finalHeight = (cropBottom - cropTop).coerceAtLeast(1)

        // 9. 创建并返回裁剪后的Bitmap，使用高质量设置
        val croppedBitmap = Bitmap.createBitmap(
            bitmap,
            cropLeft,
            cropTop,
            finalWidth,
            finalHeight
        )

        // 10. 使用高质量设置重新创建Bitmap
        val outputStream = java.io.ByteArrayOutputStream()
        croppedBitmap.compress(Bitmap.CompressFormat.JPEG, 100, outputStream)
        val byteArray = outputStream.toByteArray()
        return BitmapFactory.decodeByteArray(byteArray, 0, byteArray.size)
    }

    private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
        ContextCompat.checkSelfPermission(baseContext, it) == PackageManager.PERMISSION_GRANTED
    }

    // 添加 dp 转 px 的扩展函数
    private fun Int.dpToPx(context: android.content.Context): Int {
        val scale = context.resources.displayMetrics.density
        return (this * scale + 0.5f).toInt()
    }

    private class CollectedFacesAdapter :
        ListAdapter<Pair<Bitmap, Long>, CollectedFacesAdapter.ViewHolder>(DIFF_CALLBACK) {

        class ViewHolder(private val imageView: android.widget.ImageView) :
            RecyclerView.ViewHolder(imageView) {

            fun bind(item: Pair<Bitmap, Long>) {
                // 直接显示图片，因为图片在保存时已经处理过方向
                imageView.setImageBitmap(item.first)
            }
        }

        override fun onCreateViewHolder(parent: ViewGroup, viewType: Int): ViewHolder {
            val imageView = android.widget.ImageView(parent.context).apply {
                // 修改布局参数，确保显示4:5的比例
                val width = (parent.width / 5.2).toInt()  // 考虑间距，稍微减小一点
                val height = (width * 1.25f).toInt()  // 4:5 比例
                layoutParams = ViewGroup.LayoutParams(width, height)
                scaleType = android.widget.ImageView.ScaleType.CENTER_CROP
                setPadding(2, 2, 2, 2)
                background = ContextCompat.getDrawable(context, R.drawable.bg_face_item)
            }
            return ViewHolder(imageView)
        }

        override fun onBindViewHolder(holder: ViewHolder, position: Int) {
            holder.bind(getItem(position))
        }

        companion object {
            private val DIFF_CALLBACK = object : DiffUtil.ItemCallback<Pair<Bitmap, Long>>() {
                override fun areItemsTheSame(
                    oldItem: Pair<Bitmap, Long>,
                    newItem: Pair<Bitmap, Long>
                ): Boolean {
                    return oldItem.second == newItem.second
                }

                override fun areContentsTheSame(
                    oldItem: Pair<Bitmap, Long>,
                    newItem: Pair<Bitmap, Long>
                ): Boolean {
                    return oldItem.first.sameAs(newItem.first) && oldItem.second == newItem.second
                }
            }
        }
    }

    companion object {
        private const val TAG = "FaceAutoCollectActivity"
        private const val REQUEST_CODE_PERMISSIONS = 10
        private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
        private const val SAMPLING_INTERVAL = 100L
        private const val MIN_FACE_SIZE = 100
        private const val MAX_FACE_SIZE = 300
        private const val MAX_FACE_ANGLE = 20f
        private const val FEATURES_PER_FACE = 3
        private const val COLLECTION_COOLDOWN = 5000L
    }
} 