#include "pch.h"

#include <iostream>
#include <stdio.h>

#include "SensorWrapper.h"
#include "DepthSensor.h"
#include "Tracker.h"
struct SensorWrapperContext {
public:
	KinectDepthSensor sensor;
	Tracker tracker;
	cv::Mat colorBGR;
	cv::Mat depthBGR;
	bool init_succeeded;
	bool display;
	bool valid_target;
	int  target_x, target_y, target_z;
};
SensorWrapperContext* ctx = NULL;

void init_display() {
	cv::namedWindow("RGB", cv::WindowFlags::WINDOW_AUTOSIZE);
	cv::namedWindow("depth", cv::WindowFlags::WINDOW_AUTOSIZE);
}

void reset_display() {
	cv::destroyAllWindows();
}

int sensor_get_context(const int variable_id, int length, int* value) {
	switch (variable_id) {
		case SENSOR_VAR_DISPLAY:
			if (ctx->display) *value = 1; else *value = 0;
			return 1;
		case SENSOR_VAR_TARGET:
			value[0] = ctx->target_x;
			value[1] = ctx->target_y;
			value[2] = ctx->target_z;
			return 3;
		default:
			return -1;
	}
	return 0;
}

int sensor_set_context(const int variable_id, int length, int* value) {
	switch (variable_id) {
		case SENSOR_VAR_DISPLAY:
			if (!ctx->display && (*value)) init_display();
				else if (ctx->display && !(*value)) reset_display();
			ctx->display = (*value) != 0;
			return 1;
		default:
			return -1;
	}
	return 0;
}

int sensor_try_access_data(int& x, int& y, int& z, bool force_valid) {
	if (ctx->valid_target || force_valid) {
		x = ctx->target_x;
		y = ctx->target_y;
		z = ctx->target_z;
		return 0;
	}
	return -1;
}

int sensor_init() {
	if (ctx != NULL && ctx->init_succeeded) return -1;

	ctx = new SensorWrapperContext();

	if (WRAPPER_FAILED(ctx->sensor.init()) || WRAPPER_FAILED(ctx->sensor.start())) {
		return 1;
	}
	ctx->display = false; reset_display();
	ctx->valid_target = false;
	ctx->init_succeeded = true;
	return 0;
}

// ...Camera capture and application specific code would go here...
int sensor_update() {
	uint8_t* buffer;
	int width;
	int height;
	ctx->valid_target = false;
	cv::Rect target;
	double score;
	cv::Point target_center(0, 0);
	ctx->sensor.update();

	if (WRAPPER_NOERROR(ctx->sensor.getColorFrame(&buffer, &width, &height))) {
		cv::Mat colorMat(height, width, CV_8UC4, (void*)buffer, cv::Mat::AUTO_STEP);

		// imshow accept BGRA images. maybe this is unnecessary
		cv::cvtColor(colorMat, ctx->colorBGR, cv::COLOR_BGRA2BGR);

		ctx->tracker.tracker_update(ctx->colorBGR);
		if (WRAPPER_NOERROR(ctx->tracker.get_target(target, score))) {
			ctx->valid_target = true;
			ctx->target_x = target.x + target.width / 2;
			ctx->target_y = target.y + target.height / 2;
			if ( ctx->display) {
				target_center = cv::Point(ctx->target_x, ctx->target_y);
				cv::Point top_left(target.x, target.y);
				cv::Point bottom_right(target.x + target.width, target.y + target.height);
				cv::rectangle(ctx->colorBGR, top_left, bottom_right,
					cv::Scalar(255, 0, 0), 4);
			}
		}
		if (ctx->display)
			cv::imshow("RGB", ctx->colorBGR);
	}

	if (WRAPPER_NOERROR(ctx->sensor.getDepthFrame(&buffer, &width, &height))) {
		cv::Mat depthMat(height, width, CV_16UC1, (void*)buffer, cv::Mat::AUTO_STEP);
		cv::cvtColor(depthMat, ctx->depthBGR, cv::COLOR_GRAY2BGR);
		if (ctx->valid_target) {
			ctx->target_z = depthMat.at<uint16_t>(ctx->target_y, ctx->target_x);
		}
		if (ctx->display)
			cv::imshow("depth", ctx->depthBGR);

	}
	if (ctx->display)
		cv::waitKey(30);
	return 0;
}

int sensor_destroy() {
	reset_display();
	ctx->sensor.reset();
	delete ctx;
	ctx = NULL;
	return 0;
}


