#include <QPainter>
#include <glm/gtx/transform.hpp>
#include "Visualize.h"
#include "QFaceTracker.h"


QFaceTracker * QFaceTracker::createInstance (const CameraConfig &colorConfig,
		const CameraConfig &depthConfig)
{
	return new QFaceTracker (colorConfig, depthConfig);
}

QFaceTracker::QFaceTracker (const CameraConfig &colorConfig,
		const CameraConfig &depthConfig)
		: _pFaceTracker (NULL),
		_pFTResult (NULL), _pFTColorImage (NULL), _pFTDepthImage (NULL),
		_isLastTrackSucceeded (false), _pFaceClipper (new QFaceClipper ())
{
	_init (colorConfig, depthConfig);
}

QFaceTracker::~QFaceTracker ()
{
	if (_pFaceTracker)
		_pFaceTracker->Release ();
	if (_pFTColorImage)
		_pFTColorImage->Release ();
	if (_pFTDepthImage)
		_pFTDepthImage->Release ();
	if (_pFTResult)
		_pFTResult->Release ();
	if (_pFaceClipper)
		delete _pFaceClipper;
	_pFaceTracker = NULL;
	_pFTColorImage = NULL;
	_pFTDepthImage = NULL;
	_pFTResult = NULL;
	_pFaceClipper = NULL;
}

IFTResult *QFaceTracker::GetResult ()
{
 	return _pFTResult;
}

bool QFaceTracker::isFaceTracked () const
{
 	return _isLastTrackSucceeded;
}

bool QFaceTracker::track (QImage &colorImage, QImage &depthImage,
		glm::vec3 *hint3DArr)
{
	HRESULT hrFT = E_UNEXPECTED;

	_colorImage = colorImage.copy ();
	_depthImage = depthImage.copy ();
	if (!_colorImage.isNull ()) {
		_attachQImageToIFTImage (_colorImage, _pFTColorImage);
		if (!depthImage.isNull ())
			_attachQImageToIFTImage (_depthImage, _pFTDepthImage);
		FT_SENSOR_DATA sensorData (_pFTColorImage,
				_depthImage.isNull () ? NULL : _pFTDepthImage);

		if (_isLastTrackSucceeded)
			hrFT = _pFaceTracker->ContinueTracking (&sensorData,
					(FT_VECTOR3D *)hint3DArr, _pFTResult);
		else
			hrFT = _pFaceTracker->StartTracking (&sensorData, NULL,
					(FT_VECTOR3D *)hint3DArr, _pFTResult);
	}

	_isLastTrackSucceeded = SUCCEEDED (hrFT) &&
			SUCCEEDED (_pFTResult->GetStatus ());
	if (!_isLastTrackSucceeded)
		_pFTResult->Reset ();
	return _isLastTrackSucceeded;
}

void QFaceTracker::visualize (QImage &image)
{
	if (!isFaceTracked ())
		return;
	QPainter painter (&image);

	FT_VECTOR2D *shapePoints;
	UINT shapePointsCount;
	if (SUCCEEDED (_pFTResult->Get2DShapePoints (&shapePoints,
			&shapePointsCount))) {
		painter.setPen (QPen (QBrush (qRgb (0, 255, 0)), 3.0));
		for (UINT i = 87; i < shapePointsCount; ++i) {
			painter.drawPoint (QPointF (shapePoints[i].x, shapePoints[i].y));
		}
	}

	RECT rectFace;
	if (SUCCEEDED (_pFTResult->GetFaceRect (&rectFace))) {
		painter.setPen (QPen (QBrush (qRgb (255, 0, 0)), 1.0));
		painter.drawRect (rectFace.left, rectFace.top,
				rectFace.right - rectFace.left, rectFace.bottom - rectFace.top);
	}
}

Model QFaceTracker::get3DModel (QImage &modelTexture) const
{
	if (!isFaceTracked ())
		return Model ();

	Model m;
 	IFTModel *ftModel;

	if (!SUCCEEDED (_pFaceTracker->GetFaceModel (&ftModel)))
		return Model ();

	FLOAT *pSU = NULL;
	UINT numSU;
	BOOL suConverged;
	if (!SUCCEEDED (_pFaceTracker->GetShapeUnits (NULL, &pSU, &numSU,
			&suConverged)))
		return Model ();

	FLOAT *pAU;
	UINT numAU;
	if (!SUCCEEDED (_pFTResult->GetAUCoefficients (&pAU, &numAU)))
		return Model ();

	FLOAT scale = 1.0f;
	FLOAT rotationXYZ[3] = { 0.0f, 0.0f, 0.0f };
	FLOAT translationXYZ[3] = { 0.0f, 0.0f, 0.0f };

	POINT viewOffset = { 0, 0 };

	UINT vertexCount = ftModel->GetVertexCount ();
	m.vertices.resize (vertexCount);
	m.texCoords.resize (vertexCount);
	if (!SUCCEEDED (ftModel->Get3DShape (pSU, numSU, pAU, numAU, scale,
			rotationXYZ, translationXYZ, (FT_VECTOR3D*)&m.vertices[0],
			vertexCount)))
		return Model ();
	float topY = glm::max (m.vertices[16].y, m.vertices[49].y);
	float bottomY = m.vertices[8].y;
	glm::mat4 modelTranslate = glm::translate (0.0f, -(topY + bottomY) / 2.0f, 0.0f);
	glm::mat4 modelScale = glm::scale (1.0f / (topY - bottomY),
			1.0f / (topY - bottomY), -1.0f / (topY - bottomY));
	for (size_t i = 0; i < m.vertices.size (); ++i) {
		m.vertices[i] = (modelScale * modelTranslate *
				glm::vec4 (m.vertices[i], 1.0f)).swizzle (
						glm::X, glm::Y, glm::Z);
	}
	if (!SUCCEEDED (_pFTResult->Get3DPose (&scale, rotationXYZ,
			translationXYZ)))
		return Model ();
	if (!SUCCEEDED (ftModel->GetProjectedShape (&_fTColorConfig, 1.0f,
			viewOffset, pSU, numSU, pAU, numAU, scale, rotationXYZ,
			translationXYZ, (FT_VECTOR2D*)&m.texCoords[0], vertexCount)))
		return Model ();
	for (size_t i = 0; i < m.texCoords.size (); ++i) {
		m.texCoords[i].x = m.texCoords[i].x / (float)_fTColorConfig.Width;
		m.texCoords[i].y = m.texCoords[i].y / (float)_fTColorConfig.Height;
	}

	FT_TRIANGLE* pTriangles;
	UINT triangleCount;
	if (!SUCCEEDED (ftModel->GetTriangles (&pTriangles, &triangleCount)))
		return Model ();
	m.indices.resize (triangleCount);
	memcpy (&m.indices[0], pTriangles, triangleCount * sizeof (FT_TRIANGLE));
	for (size_t i =0; i < m.indices.size (); ++i) {
		glm::uvec3 temp = m.indices[i];
		m.indices[i] = glm::uvec3 (temp.z, temp.y, temp.x);
	}
	ftModel->Release ();

	m.texture = modelTexture;

	return m;
}

QImage QFaceTracker::clipFace (QImage &image)
{
	if (!isFaceTracked ())
		return QImage ();
	Model m = get3DModel (image);
	return _pFaceClipper->clipFace (m);
}

QImage QFaceTracker::getFace ()
{
  	return clipFace (_colorImage);
}

void QFaceTracker::_init (const CameraConfig &colorConfig,
		const CameraConfig &depthConfig)
{
	HRESULT hr;

	_fTColorConfig.Width = colorConfig.frameSize.width ();
	_fTColorConfig.Height = colorConfig.frameSize.height ();
	_fTColorConfig.FocalLength = colorConfig.focalLength;
	_fTDepthConfig.Width = depthConfig.frameSize.width ();
	_fTDepthConfig.Height = depthConfig.frameSize.height ();
	_fTDepthConfig.FocalLength = depthConfig.focalLength;

	// Try to start the face tracker.
	_pFaceTracker = FTCreateFaceTracker ();
	if (!_pFaceTracker)
		throw 2;

	hr = _pFaceTracker->Initialize (&_fTColorConfig,
			_fTDepthConfig.Width ? &_fTDepthConfig : NULL,
			NULL, NULL);
	if (FAILED (hr))
		throw 3;

	hr = _pFaceTracker->CreateFTResult (&_pFTResult);
	if (FAILED (hr) || !_pFTResult)
		throw 4;

	_pFTColorImage = FTCreateImage ();
	if (!_pFTColorImage)
		throw 5;

	if (_fTDepthConfig.Width) {
		_pFTDepthImage = FTCreateImage ();
		if (!_pFTDepthImage)
			throw 6;
	}
	_isLastTrackSucceeded = false;
}

void QFaceTracker::_convertQImageToIFTImage (const QImage &qimage,
		IFTImage *pIFTImage)
{
	if (qimage.isNull () || !pIFTImage)
		throw E_INVALIDARG;
	FTIMAGEFORMAT f;
	switch (qimage.format ()) {
	case QImage::Format_RGB32:
		f = FTIMAGEFORMAT_UINT8_B8G8R8X8;
		break;
	case QImage::Format_RGB16:
		f = FTIMAGEFORMAT_UINT16_D13P3;
		break;
	default:
		throw E_INVALIDARG;
	}
	if (pIFTImage->GetWidth () != qimage.width () ||
			pIFTImage->GetHeight () != qimage.height () ||
			pIFTImage->GetFormat () != f) {
		pIFTImage->Allocate (qimage.width (), qimage.height (), f);
	}
	memcpy (pIFTImage->GetBuffer (), qimage.bits (),
			glm::min ((int)pIFTImage->GetBufferSize (), qimage.byteCount ()));
}

void QFaceTracker::_attachQImageToIFTImage (QImage &qimage, IFTImage *pIFTImage)
{
	if (qimage.isNull () || !pIFTImage)
		throw E_INVALIDARG;
	FTIMAGEFORMAT f;
	switch (qimage.format ()) {
	case QImage::Format_RGB32:
		f = FTIMAGEFORMAT_UINT8_B8G8R8X8;
		break;
	case QImage::Format_RGB16:
		f = FTIMAGEFORMAT_UINT16_D13P3;
		break;
	default:
		throw E_INVALIDARG;
	}
	pIFTImage->Attach (qimage.width (), qimage.height (), qimage.bits (), f,
			qimage.bytesPerLine ());
}