import * as tfconv from '@tensorflow/tfjs-converter';
import * as tf from '@tensorflow/tfjs-core';

const ANCHORS_CONFIG = {
	'strides': [8, 16],
	'anchors': [2, 6]
};

// `NUM_LANDMARKS` is a fixed property of the model.
// 关键点数目
const NUM_LANDMARKS = 6;

// 生成锚点，传入图片大小和锚点配置，输出锚点二维数组，数组形状是896*2
function generateAnchors(width, height, outputSpec) {
	const anchors = [];
	for (let i = 0; i < outputSpec.strides.length; i++) {
		const stride = outputSpec.strides[i];
		const gridRows = Math.floor((height + stride - 1) / stride);
		const gridCols = Math.floor((width + stride - 1) / stride);
		const anchorsNum = outputSpec.anchors[i];

		for (let gridY = 0; gridY < gridRows; gridY++) {
			const anchorY = stride * (gridY + 0.5);

			for (let gridX = 0; gridX < gridCols; gridX++) {
				const anchorX = stride * (gridX + 0.5);
				for (let n = 0; n < anchorsNum; n++) {
					anchors.push([anchorX, anchorY]);
				}
			}
		}
	}

	return anchors;
}

function decodeBounds(boxOutputs, anchors) {
	const boxStarts = tf.slice(boxOutputs, [0, 1], [-1, 2]);
	const centers = tf.add(boxStarts, anchors);
	const boxSizes = tf.slice(boxOutputs, [0, 3], [-1, 2]);

	const halfBoxSize = tf.div(boxSizes, 2);
	const starts = tf.sub(centers, halfBoxSize);
	const ends = tf.add(centers, halfBoxSize);

	const concatAxis = 1;
	return tf.concat2d(
		[starts, ends],
		concatAxis);
}

// blazeface模型输出的未归一化的人脸信息
export class BlazeFaceModel {

	constructor(model, width, height, maxFaces, iouThreshold, scoreThreshold) {
		this.blazeFaceModel = model;
		this.width = width;
		this.height = height;
		this.maxFaces = maxFaces;
		this.anchorsData = generateAnchors(
			width, height,
			ANCHORS_CONFIG);
		this.anchors = tf.tensor2d(this.anchorsData);

		this.iouThreshold = iouThreshold;
		this.scoreThreshold = scoreThreshold;

		this.offsetX = 0
		this.offsetY = 0
		this.scaleFactor = 1
	}

	makeSquare(image) {
		const start = [0, 0, 0]
		const size = [-1, -1, -1]
		const [height, width, _] = image.shape
		if (height > width) {
			this.offsetY = start[0] = (height - width) / 2
			size[0] = width
			this.scaleFactor = width / this.width
		} else {
			this.offsetX = start[1] = (width - height) / 2
			size[1] = height
			this.scaleFactor = height / this.height
		}
		return tf.slice(image, start, size)
	}

	async preprocess(image) {
		return tf.tidy(() => {
			// 把uint8数据变成tensor数据
			const tensor3dImage = tf.browser.fromPixels(image);
			// 截取图片中间正方形部分
			const squareImage = this.makeSquare(tensor3dImage)
			// 缩小图片到 128*128
			const resizedImage = tf.image.resizeBilinear(squareImage,
				[this.width, this.height]);
			// 图片添加一个纬度（batch_size），用于适应预测输入要求
			const tensor4dImage = tf.expandDims(resizedImage, 0)
			// 图片从[0,255]归一化到[-1,1]
			// int[0,255] -cast-> float[0,255] -div-> float[0,2] -sub-> float[-1,1]
			const normalizedImage = tf.sub(tf.div(tf.cast(tensor4dImage, 'float32'), 127.5), 1);
			return normalizedImage
		})
	}

	async postprocess(res) {
		// 获取解码后的预测框和对应置信度
		const [outputs, boxes, scores] = tf.tidy(() => {
			const prediction = tf.squeeze(res);
			const decodedBounds = decodeBounds(prediction, this.anchors);
			const logits = tf.slice(prediction, [0, 0], [-1, 1]);
			const scores = tf.squeeze(tf.sigmoid(logits));
			return [prediction, decodedBounds, scores];
		})

		// 非极大值抑制。因为没有同步模式，所以只能放到tidy外面
		const indicesTensor = await tf.image.nonMaxSuppressionAsync(
			boxes, scores, this.maxFaces, this.iouThreshold, this.scoreThreshold);
		const indices = indicesTensor.arraySync()
		// 根据抑制结果，截取出有效的预测框、关键点和置信度
		const [topLefts, bottomRights, score, landmarks] = tf.tidy(() => {
			const suppressBox = tf.gather(boxes, indicesTensor)

			const boxStarts = tf.slice(suppressBox, [0, 0], [-1, 2]);
			const startNormalized = tf.mul(boxStarts, this.scaleFactor);
			const topLefts = tf.add(startNormalized, tf.tensor1d([this.offsetX, this.offsetY]))
			// const topLefts = tf.slice(suppressBox,[0,0],[-1,2]).mul(this.scaleFactor).add(tf.tensor1d([this.offsetX,this.offsetY]))
			// 转义成以下语句
			const boxEnds = tf.slice(suppressBox, [0, 2], [-1, 2]);
			const endNormalized = tf.mul(boxEnds, this.scaleFactor);
			const bottomRights = tf.add(endNormalized, tf.tensor1d([this.offsetX, this.offsetY]))
			// const bottomRights = tf.slice(suppressBox,[0,2],[-1,2]).mul(this.scaleFactor).add(tf.tensor1d([this.offsetX,this.offsetY]))
			// 转义成以下语句
			const suppressScore = tf.gather(scores, indicesTensor)
			const suppressOutput = tf.gather(outputs, indicesTensor)

			// const landmarks =tf.slice(suppressOutput,[0,5],[-1,-1]).reshape([-1,NUM_LANDMARKS,2])
			const temp = tf.slice(suppressOutput, [0, 5], [-1, -1])
			const landmarks = tf.reshape(temp, [-1, NUM_LANDMARKS, 2])

			return [topLefts.arraySync(), bottomRights.arraySync(), suppressScore.arraySync(), landmarks
				.arraySync()
			]
		})

		// 删除没用的张量 防止内存泄漏
		outputs.dispose()
		boxes.dispose()
		scores.dispose()

		// 做关键点解码，封装成NormalizedFace数组
		const normalizedFaces = []
		for (let i in indices) {
			const normalizedLandmark = (landmarks[i]).map((landmark) => ([
				(landmark[0] + this.anchorsData[indices[i]][0]) * this.scaleFactor + this.offsetX,
				(landmark[1] + this.anchorsData[indices[i]][1]) * this.scaleFactor + this.offsetY
			]))
			const normalizedFace = {
				topLeft: topLefts[i],
				bottomRight: bottomRights[i],
				landmarks: normalizedLandmark,
				probability: score[i]
			}
			normalizedFaces.push(normalizedFace)
		}
		indicesTensor.dispose()
		return normalizedFaces
	}
	async estimateFaces(image, width, height) {
		const preprocessImage = await this.preprocess({
			data: image,
			width,
			height
		})
		const batchedPrediction = await this.blazeFaceModel.predict(preprocessImage);
		preprocessImage.dispose()
		return this.postprocess(batchedPrediction)
	}
}
