import { createApp } from 'vue'
import App from './App.vue'
import router from './router'
import store from './store'

import Antd from 'ant-design-vue'
import './style/base.scss'
import 'normalize.css'
import 'ant-design-vue/dist/antd.css'
import * as FaceDetectObj from './model/index.js';

const FaceDetector = FaceDetectObj.FaceDetector;
const createImage = FaceDetectObj.createImage;

const faceDetector: any = new FaceDetector(undefined, [['landmark68', {modelPath: './face_landmark_localization/model.json'}]]);

/**
 * 封装一个画布，检测图片的结果将显示在这个画布中
 */
class FaceDetectCanvas {
    private faceDetector: any;
    private resCanvas: HTMLCanvasElement = document.createElement('canvas');
    constructor() {
        this.faceDetector = faceDetector;
    }

    /**
     * 检测人脸的主要函数
     * @param canvasEle: 存放最终人脸检测结果的画布
     * @param imageURL: 要检测的图像URL
     */
    async startDetectImage(canvasEle: HTMLCanvasElement, imageURL: string) {
        const imgEle: HTMLImageElement = await createImage(imageURL);
        const resCtx: CanvasRenderingContext2D = this.resCanvas.getContext('2d') as CanvasRenderingContext2D;
        const finalCtx: CanvasRenderingContext2D = canvasEle.getContext('2d') as CanvasRenderingContext2D;
        const { naturalWidth, naturalHeight } = imgEle;
        this.resCanvas.width = naturalWidth;
        this.resCanvas.height = naturalHeight;

        // 以原始图片的尺寸进行检测，将检测结果画在一个临时画布中
        const img_bitmap = await createImageBitmap(imgEle);
        const res = await this.faceDetector.detect(img_bitmap, {threshold: 0.72});
        resCtx.drawImage(imgEle, 0, 0, naturalWidth, naturalHeight);
        this.drawRes(res, resCtx);

        // 将临时画布的内容缩放到网页中展示的画布中
        const {x: imgX, y: imgY, w: imgWidth, h: imgHeight} = this.calcImageInfo(canvasEle.width, canvasEle.height, naturalWidth,
            naturalHeight);  // 获取画布缩放信息
        finalCtx.clearRect(0, 0, canvasEle.width, canvasEle.height);  // 清除画布，否则图片会叠加绘制在画布上
        finalCtx.drawImage(this.resCanvas, imgX, imgY, imgWidth, imgHeight);  // 缩放画布内容到网页展示的画布中
    }

    /**
     * 将人脸检测的结果画在画布中
     * @param data: 人脸检测的结果
     * @param ctx: 画布上下文
     */
     drawRes(data: any, ctx: CanvasRenderingContext2D) {
        ctx.fillStyle = ctx.strokeStyle = 'green';
        ctx.font = '20px';
        ctx.lineWidth = 2;
        data.forEach((item: any) => {
            const x = item.location.left;
            const y = item.location.top;
            const w = item.location.width;
            const h = item.location.height;
            ctx.strokeRect(x, y, w, h);
            ctx.fillText(item.face_probability.toFixed(6), x, y);
            ctx.beginPath();
            item.landmark68.forEach(({ x, y }: any) => {
                ctx.moveTo(x, y);
                ctx.arc(x, y, 2, 0, 2 * Math.PI);
                ctx.fill();
            });
        });
    }


    /**
     * 计算待检测图片在画布中的缩放比例，以及图片在画布中居中绘制的坐标
     * @param ctxX: 画布的宽度
     * @param ctxY: 画布的高度
     * @param imgW: 图片的宽度
     * @param imgH: 图片的高度
     * @return obj: 图片在画布中的绘制的宽高以及居中绘制的坐标信息
     */
    calcImageInfo (ctxX: number, ctxY: number, imgW: number, imgH: number) {
        let obj = {
            x: 0,  // x轴坐标
            y: 0,  // y轴坐标
            w: 0,  // 图片宽度
            h: 0  // 图片高度
        }
        if (ctxX / ctxY > imgW / imgH) {
            obj.w = Math.ceil(imgW * (ctxY / imgH)) <= ctxX ? Math.ceil(imgW * (ctxY / imgH)) : ctxX
            obj.h = ctxY
        }
        else {
            obj.w = ctxX
            obj.h = Math.ceil(imgH * (ctxX / imgW)) <= ctxY ? Math.ceil(imgH * (ctxX / imgW)) : ctxY
        }
        obj.x = Math.round(((ctxX - obj.w) / 2))
        obj.y = Math.round(((ctxY - obj.h) / 2))
        return obj
    }

}

const app = createApp(App);
const faceDetectCanvas = new FaceDetectCanvas();
app.provide('$faceDetectCanvas', faceDetectCanvas);
app.use(store).use(router).use(Antd).mount('#app');
