import * as faceapi from "face-api.js";

main()

async function main(){
    // 加载模型权重文件
    await faceapi.nets.ssdMobilenetv1.loadFromUri('http://127.0.0.1:8888/weights')
    await faceapi.nets.faceLandmark68Net.loadFromUri('http://127.0.0.1:8888/weights')
    await faceapi.nets.faceRecognitionNet.loadFromUri('http://127.0.0.1:8888/weights')

    await faceapi.loadFaceLandmarkModel('http://127.0.0.1:8888/weights')
    await faceapi.loadFaceRecognitionModel('http://127.0.0.1:8888/weights')
    await faceapi.nets.tinyFaceDetector.loadFromUri('http://127.0.0.1:8888/weights')

    const image1 = document.getElementById('image1')
    const image2 = document.getElementById('image2')

    // 初始化人脸特征
    const options = new faceapi.SsdMobilenetv1Options({ minConfidence:0.5 })
    const reference = await faceapi.detectSingleFace(image1).withFaceLandmarks().withFaceDescriptor()
    console.log(reference);
    if (!reference) {
        return
    }

    // 验证图片人脸特征组
    const results = await faceapi.detectAllFaces(image2, options).withFaceLandmarks().withFaceDescriptors()
    console.log(results);
    if(!results.length){
        return
    }

    // 使用参考数据初始化 FaceMatcher
    // 检测到的人脸的描述符与后续图像的人脸进行匹配
    const faceMatcher = new faceapi.FaceMatcher(reference)

    // 创建一个画布
    const canvas = document.getElementById('image2Overlay')
    faceapi.matchDimensions(canvas, image1)

    // 验证每一张图片的相似性
    const resizedResults = faceapi.resizeResults(results,image2)
    resizedResults.forEach(({detection,descriptor})=>{
        // 使用初始人脸特征识别第二张图片中的每一个人脸匹配度
        const label = faceMatcher.findBestMatch(descriptor).toString()
        const options = {label}
        const drawBox = new faceapi.draw.DrawBox(detection.box, options)
        drawBox.draw(canvas)
    })


    closeLoading()

    // 使用参数特征识别image2
    // const bestMatch = faceMatcher.findBestMatch(result.descriptor).toString()
    // console.log(bestMatch);
    
    // const resizedResults = faceapi.resizeResults(results, image1)
    // const singleResult = await faceapi.detectSingleFace(image2).withFaceLandmarks().withFaceDescriptor()
    // const bestMatch = faceMatcher.findBestMatch(singleResult.descriptor)
    // console.log(bestMatch.toString());

    // const canvas = document.getElementById('queryImgOverlay')
    // faceapi.matchDimensions(canvas, image1)
    // const resizedResults = faceapi.resizeResults(results, image1)
    // resizedResults.forEach(({ detection, descriptor }) => {
    //     const label = faceMatcher.findBestMatch(descriptor).toString()
    //     const options = { label }
    //     console.log(label);
    //     const drawBox = new faceapi.draw.DrawBox(detection.box, options)
    //     drawBox.draw(canvas)
    // })
}