<!DOCTYPE html>
<html>
<head>
    <title>BodyPix Demo 02 - 摄像头实时 用 body-pix 抠出人的轮廓</title>
    <!-- <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script> -->
    <script src="/static/js/tf@3.29.1.min.js"></script>    
    <!-- <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/body-pix"></script> -->
    <script src="/static/js/body-pix@2.2.0.min.js"></script>
</head>
<body>
    <video id="video" width="640" height="480" style="display: none;" autoplay muted></video>    
    <canvas id="outputCanvas"
        style="position: absolute;
        z-index: 99;
        top: 100px;
        left: 0;">
    </canvas>
    
    
    <button id="startButton">Start Body Segmentation</button>
    <script>
        let video = document.getElementById('video');
        let canvas = document.getElementById('outputCanvas');
        let ctx = canvas.getContext('2d');       

        async function startVideo() {
            const stream = await navigator.mediaDevices.getUserMedia({ video: true });
            video.srcObject = stream;
        }

        async function initializeBodyPix() {
            try {
                net = await bodyPix.load({
                    architecture: 'MobileNetV1',
                    outputStride: 16,
                    multiplier: 0.75,
                    quantBytes: 2
                });

                console.log('BodyPix model loaded successfully.');
            } catch (error) {
                console.error('Failed to load BodyPix model:', error);
            }
        }

        async function segmentBody() {
            if (!net) {
                console.warn('BodyPix model is not loaded yet.');
                return;
            }

            const { data:map } = await net.segmentPerson(video, {internalResolution: 'medium'});
            // console.log('calc map:', map);
            
            ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);

            const { data:imgData } = ctx.getImageData(0, 0, canvas.width, canvas.height);

            const newImg = ctx.createImageData(canvas.width, canvas.height);
            const newImgData = newImg.data;

            const whiteColor = [0, 0, 0, 0]; // 透明

            for(let i=0; i<map.length; i++) 
            {
                if(map[i])
                {
                    // 当前像数是人, 保留当前的颜色
                    [newImgData[i*4], newImgData[i*4+1], newImgData[i*4+2], newImgData[i*4+3]] = [imgData[i*4], imgData[i*4+1], imgData[i*4+2], imgData[i*4+3]];
                }
                else
                {
                    // 当前像数不是人, 设置为透明
                    [newImgData[i*4], newImgData[i*4+1], newImgData[i*4+2], newImgData[i*4+3]] = whiteColor;
                }
            }

            // console.log('draw mask');
            ctx.putImageData(newImg, 0, 0);

            requestAnimationFrame(segmentBody); // 重复此过程以实现实时效果
        }

        document.getElementById('startButton').addEventListener('click', async () => {                    
            await initializeBodyPix();
            await startVideo();
            await video.play();

            canvas.width = video.videoWidth;
            canvas.height = video.videoHeight;

            segmentBody();
        });

    </script>
</body>
</html>