<template>
	<view class="container">
		<view class="tips">
			<view style="font-size: 35rpx ">请确认本人操作</view>
			<view style="color: #bdbdbd; font-size: 30rpx; padding: 50rpx;">请保持正脸在取景框中根据屏幕指示完成识别</view>
		</view>
		<view style="position: relative;">
			<!-- #ifdef MP-WEIXIN -->
			<!-- <canvas id="mark-canvas" class="mark-canvas" type="2d"></canvas> -->
			<camera device-position="front" flash="off" binderror="error" frame-size="small"></camera>
			<cover-view class="face-cover " :class="andriodStyle">
				<cover-view class="face-tips">
					{{result}}
				</cover-view>
			</cover-view>
			<!-- #endif -->
		</view>
	</view>
</template>

<script>
	import * as blazeface from '@/model/index'
	var _model = null;
	let listener;
	export default {
		data() {
			return {
				result: 'tips',
				modelUrl: 'http://192.168.1.110:8081/model.json',
				cxt: {},
				andriodStyle: '',
				speedMaxCount: 10,

				/* 以下变量供调试使用 */
				canvasCtx: {},
				offsetX: 0,
				offsetY: 0,
				factor: 0,
				canvasWidth: 250,
				canvasHeight: 250,
			}
		},
		async onReady() {
			this.loadmodel(this.modelUrl);
			this.startTacking();
			// 画布初始化，用于调试，与业务
			// this.initCanvas()
		},
		onShow() {
			let that = this
			switch (uni.getSystemInfoSync().platform) {
				case 'android':
					console.log('运行Android上')
					that.andriodStyle = ' android '
					break;
				case 'ios':
					console.log('运行iOS上,这里会有一个样式问题')
					break;
				default:
					console.log('运行在开发者工具上')
					that.andriodStyle = ' android '
					break;
			}
		},
		onUnload() {
			this.stopTacking()
		},
		methods: {
			startTacking() {
				if (!listener) {
					const context = uni.createCameraContext();
					this.cxt = context;
					var count = 0;
					// #ifdef MP-WEIXIN
					listener = context.onCameraFrame(async frame => {
						// 每秒60帧，这里控制每0.02获取一次图片
						count++;
						if (count === this.speedMaxCount) {
							const res = await this.detectFace(frame);

							// 计算偏离值和缩放比
							if (this.factor === 0) {
								if (frame.height > frame.width) {
									this.factor = this.canvasWidth / frame.width
									this.offsetY = (frame.height * this.factor - this.canvasHeight) / 2
								} else {
									this.factor = this.canvasHeight / frame.height
									this.offsetX = (frame.width * this.factor - this.canvasWidth) / 2
								}
							}
							// DEBUG： 调试使用建议保留
							// this.clearMarkCanvas();
							// this.drawFace(res);
							this.showDetectInfo(res);
							count = 0;
						}
					});
					// #endif
				}
				listener.start();
				console.log('startTacking', 'listener is start');
			},
			stopTacking() {
				if (listener) {
					listener.stop();
				}
			},
			/**
			 * 人脸模型数据载入
			 */
			async loadmodel(modelUrl) {
				const model = await blazeface.load({
					maxFaces: 3,
					modelUrl: modelUrl
				});
				this.result = '载入中...'
				_model = model;
				this.result = '模块加载完成'
			},
			async detectFace(frame) {
				if (_model) {
					const res = await _model.estimateFaces(new Uint8Array(frame.data),
						frame.width, frame.height);
					return res
				} else {
					console.error("模型未加载");
					return null;
				}
			},
			//拍照
			tackPhoto() {
				const camera = this.cxt //创建照相机对象
				camera.takePhoto({
					quality: 'high', //high 高质量成像，normal 普通质量，row 低质量
					success: (res) => {
						// 停止监听器
						this.stopTacking()
						this.result = "人脸识别中";
						const photoSrc = res.tempImagePath
						console.log("photoSrc: ", photoSrc);
						// 如果人脸验证成功
						// 人脸验证不通过，重新开启监听器
						// setTimeout(()=> {this.startTacking(),20 *1000})
					},
					fail: (res) => {
						console.log("拍照失败res: ", res);
					}
				})
			},
			showDetectInfo(res) {
				console.log("res: ",res);
				if (res.length > 1) {
					this.result = "靠近一点"
				} else if (res.length === 1) {
					if(this.listenFullFace(res)){
						this.result = "眨眨眼";
						this.tackPhoto();
					}else {
						this.result = "把脸移入圈内";
					}
				} else {
					this.result = "没有检测到脸";
				}
			},
			/**
			 * 检测全脸的存在
			 * @param {Object} res 人脸特征信息
			 */
			listenFullFace(res) {
				const FACE_POINT_COUNTS = 6;
				let faceKeyPoints = res.map(face => {
					let arrays = []
					// 画关键点
					const landmarks = face.landmarks
					for (let i = 0; i < 6; ++i) {
						const x = 0, y = 1;
						const point = this.transformPoint([landmarks[i][x], landmarks[i][y]])
						if (point[x] >= 0 && point[x] <= this.canvasWidth && point[y] >= 0 && point[y] <= this.canvasHeight )
						arrays.push(point)
					}
					return arrays
				})
				
				return faceKeyPoints[0]?.length === FACE_POINT_COUNTS;
			},


			/**
			 * 初始化画布
			 */
			initCanvas() {
				const query = wx.createSelectorQuery()
				query.select('#mark-canvas')
					.fields({
						node: true,
						size: true
					})
					.exec((res) => {
						const canvas = res[0].node
						const ctx = canvas.getContext('2d')

						const systemInfo = wx.getSystemInfoSync()
						const dpr = systemInfo.pixelRatio
						const screenWidth = systemInfo.screenWidth
						canvas.width = res[0].width * dpr
						canvas.height = res[0].height * dpr
						const scaleFactor = screenWidth * dpr / 375
						ctx.scale(scaleFactor, scaleFactor)

						ctx.lineWidth = 3
						ctx.strokeStyle = 'red'
						ctx.fillStyle = 'yellow'

						this.canvasCtx = ctx
					})
			},
			/**
			 * 清除画布
			 */
			clearMarkCanvas() {
				this.canvasCtx.clearRect(0, 0, this.canvasWidth, this.canvasHeight)
			},
			/**
			 * 绘制人脸检测
			 * @param {Object} res 人脸信息
			 */
			drawFace(res) {
				res.map(face => {
					// 画关键点
					const landmarks = face.landmarks
					for (let i = 0; i < 6; ++i) {
						const point = this.transformPoint([landmarks[i][0], landmarks[i][1]])
						this.canvasCtx.fillRect(point[0], point[1], 6, 6)
					}

					// 画预测框
					const start = this.transformPoint(face.topLeft)
					const end = this.transformPoint(face.bottomRight);
					const size = [end[0] - start[0], end[1] - start[1]];

					this.canvasCtx.strokeRect(start[0], start[1], size[0], size[1]);
				})
			},
			/**
			 * 位置信息按比例转换缩放
			 * @param {Object} point 位置
			 */
			transformPoint(point) {
				const x = point[0] * this.factor - this.offsetX
				const y = point[1] * this.factor - this.offsetY
				return [x, y]
			},
		}
	}
</script>

<style>
	/* 调试使用 */
	/* .mark-canvas {
		position: absolute;
		width: 500rpx;
		height: 500rpx;
		z-index: 10;
	} */

	camera {
		width: 500rpx;
		height: 500rpx;
		border-radius: 50%;
		z-index: 0;
	}

	.face-cover {
		position: absolute;
		top: 0rpx;
		width: 500rpx;
		height: 500rpx;
	}

	.android {
		border-radius: 50%;
		overflow: hidden;
	}

	.face-tips {
		padding-top: 40rpx;
		text-align: center;
		width: 500rpx;
		height: 80rpx;
		font-size: 36rpx;
		color: #fff;
		background-color: rgba(0, 0, 0, 0.4);
	}

	.tips {
		margin: 30rpx;
		display: flex;
		flex-direction: column;
		align-items: center;
	}

	.container {
		display: flex;
		flex-direction: column;
		align-items: center;
	}
</style>
