<!DOCTYPE html>
<html>

<head>
    <meta charset="UTF-8">
    <title>Title</title>
    <script src="../js/load.js"></script>
    <script src="face-api.min.js"></script>
    <!-- <script src="vconsole.min.js"></script> -->
</head>

<body>
    <div style="display: flex;align-items: center;justify-content: center;width: 100%;height: 100%;flex-direction: column;">
        <img id="showImg" src="check.jpg" alt="图片 预览..." style="display: none;">
        <video id="video" muted playsinline></video>
        <button class="layui-btn layui-btn-lg layui-btn-disabled" id="AnalysisFaceOnline" onclick="AnalysisFaceOnline()" style="margin-top: 20px;height: 200px;width: 400px;font-size: 60px;" disabled>打开摄像头</button>
        <button class="layui-btn layui-btn-lg layui-btn-disabled" id="StartAnalysis" onclick="StartAnalysis()" style="margin-top: 20px;height: 200px;width: 400px;font-size: 60px" disabled>开始检测</button>
        <button class="layui-btn layui-btn-lg layui-btn-disabled" id="ResetAnalysis" onclick="ResetAnalysis()" style="margin-top: 20px;height: 200px;width: 400px;font-size: 60px" disabled>重新检测</button>
        <label id="tip" style="font-size: 60px;font-weight: bold;color:red;margin-top: 20px;">提示信息</label>
        <canvas id="canvas" style="display:none;"></canvas>
    </div>
</body>
<script>
    var video = document.getElementById('video');
    var canvas = document.getElementById('canvas');
    var context = canvas.getContext('2d');

    $(function() {
        StartLoading("加载中");
        Promise.all([
            faceapi.loadFaceDetectionModel('models'),
            faceapi.loadFaceLandmarkModel('models')
        ]).then(startAnalysis);
        vedioCatchInit();
    })

    function startAnalysis() {
        console.log('模型加载成功！');
        var canvas1 = faceapi.createCanvasFromMedia(document.getElementById('showImg'))
        faceapi.detectSingleFace(canvas1).then((detection) => {
            if (detection) {
                faceapi.detectFaceLandmarks(canvas1).then((landmarks) => {
                    console.log('模型预热调用成功！');
                    CloseLoading();
                    $("#AnalysisFaceOnline").removeClass("layui-btn-disabled").attr("disabled", false)
                    layui.form.render();
                    alert("人脸检测模型加载成功")
                })
            }
        })

    }

    function HandleClose() {
        CloseDialog();
    }
</script>

<script>
    function AnalysisFaceOnline() {
        var videoElement = document.getElementById('video');
        // 检查浏览器是否支持getUserMedia API
        if (navigator.mediaDevices.getUserMedia) {
            navigator.mediaDevices.getUserMedia({ video: { facingMode: "user" } }) // 请求视频流
                .then(function(stream) {
                    videoElement.srcObject = stream; // 将视频流设置到<video>元素
                    videoElement.play();
                    $("#StartAnalysis").removeClass("layui-btn-disabled").attr("disabled", false)
                    layui.form.render();
                })
                .catch(function(err) {
                    console.error("获取摄像头错误：", err); // 处理错误
                });
        } else {
            console.error("您的浏览器不支持getUserMedia API");
        }
    }

    var maxFrames = 10;
    var openMouthFrames = [];
    var shakeHeadFrames = [];
    var thresholdFrames = 6;
    var lastFace;


    var openmouse = true;
    var shakehead = true;

    function vedioCatchInit() {
        video.addEventListener('play', function() {
            function captureFrame() {
                if (!video.paused && !video.ended) {
                    // 设置canvas的尺寸与视频帧相同
                    canvas.width = 200;
                    canvas.height = 300;
                    // 绘制当前视频帧到canvas
                    context.drawImage(video, 0, 0, canvas.width, canvas.height);
                    // 将canvas内容转换为data URL
                    //outputImage.src = canvas.toDataURL('image/png');
                    // 可以在这里添加代码将data URL发送到服务器或进行其他处理
                    faceapi.detectSingleFace(canvas).then((detection) => {
                        if (detection) {
                            faceapi.detectFaceLandmarks(canvas).then((landmarks) => {
                                if (!openmouse) {
                                    var mouth = landmarks.getMouth();
                                    var mouthHeight = Math.abs(mouth[14].y - mouth[18].y);
                                    var mouthWidth = Math.abs(mouth[12].x - mouth[16].x);
                                    var mouthAspectRatio = mouthHeight / mouthWidth;

                                    // 根据嘴唇间距和区域变化等多个指标加权判断
                                    var mouthOpen = mouthAspectRatio > 0.35;

                                    // 记录检测结果
                                    openMouthFrames.push(mouthOpen);
                                    if (openMouthFrames.length > maxFrames) {
                                        openMouthFrames.shift();
                                    }

                                    // 判断是否连续几帧都检测到张嘴
                                    var openMouthCount = openMouthFrames.filter(frame => frame).length;
                                    if (openMouthCount >= thresholdFrames) {
                                        openmouse = true;
                                        $("#tip").text("请摇摇头")
                                        shakehead = false;
                                        shakeHeadFrames = [];
                                    }
                                }

                                if (!shakehead) {
                                    var leftEye = landmarks.getLeftEye();
                                    var rightEye = landmarks.getRightEye();
                                    var nose = landmarks.getNose();
                                    var jaw = landmarks.getJawOutline();
                                    var eyeCenter = {
                                        x: (leftEye[0].x + rightEye[3].x) / 2,
                                        y: (leftEye[0].y + rightEye[3].y) / 2
                                    };
                                    var noseTip = nose[3]; // 鼻尖通常在鼻子特征点的第4个点

                                    var dx = noseTip.x - eyeCenter.x;
                                    var dy = noseTip.y - eyeCenter.y;

                                    var yaw = Math.atan2(dx, distance(leftEye[0], rightEye[3])) * (180 / Math.PI);

                                    var shakeHead = Math.abs(yaw) > 10;
                                    // 记录检测结果
                                    shakeHeadFrames.push(shakeHead);
                                    if (shakeHeadFrames.length > maxFrames) {
                                        shakeHeadFrames.shift();
                                    }

                                    // 判断是否连续几帧都检测到张嘴
                                    var shakeHeadCount = shakeHeadFrames.filter(frame => frame).length;
                                    if (shakeHeadCount >= thresholdFrames) {
                                        shakehead = true;
                                        $("#tip").text("检测完毕，正在识别人脸")
                                        var img = canvas.toDataURL('image/png');
                                        var param = {
                                            base64Image: img
                                        }
                                        HttpPost(JSON.stringify(param), 'face/regist/query', function(result) {
                                            $("#ResetAnalysis").removeClass("layui-btn-disabled").attr("disabled", false)
                                            layui.form.render();
                                            if (result.code == 0) {
                                                $("#tip").text("【人脸匹配结果】：" + result.data.userName)
                                            } else {
                                                $("#tip").text(result.msg)
                                            }
                                        })
                                    }
                                }
                            })
                        } else {
                            console.log("no face")
                        }
                    })
                    // 递归调用以持续捕获帧
                    setTimeout(captureFrame, 100); // 每500毫秒捕获一次
                }
            }
            captureFrame(); // 开始捕获帧
        });
    }

    function distance(point1, point2) {
        var dx = point1.x - point2.x;
        var dy = point1.y - point2.y;
        return Math.sqrt(dx * dx + dy * dy);
    }

    function StartAnalysis() {
        openmouse = false;
        openMouthFrames = [];
        $("#tip").text("请张张嘴")
    }

    function ResetAnalysis() {
        openmouse = false;
        openMouthFrames = [];
        $("#tip").text("请张张嘴")
    }
</script>

</html>