#include "MBTriangle.h"
#include "MBCameraModel.h"
#include "MBEnums.h"
#include "MBImageManager.h"
//#include "MBOctree.h"
#include "MBTimer.h"
#include "MBVec2.h"
#include "MBVec4.h"
#include "MBWeightMethodManager.h"

#include "MBDirectoryMethods.h"
#include "MBStringMethods.h"

#include <iostream>
#include <limits>
#include <string>

// GLM
#define GLM_PRECISION_HIGHP_FLOAT
#include <glm/glm.hpp>

using cv::Mat;
using cv::MatConstIterator_;
using cv::Point;
using cv::Scalar;
using cv::Vec3d;

using std::cout;
using std::endl;
using std::make_pair;
using std::numeric_limits;
using std::max;
using std::min;
using std::pair;
using std::set;
using std::string;
using std::vector;

set<MBCameraPosIDWithVertexIndices> MBTriangle::indicesInIntersectingSightTriangles = set<MBCameraPosIDWithVertexIndices>();

//double MBTriangle::WEIGHT_D_INFINITY = 1.0; // because sin(x) <= 1.0 for all x element R
double MBTriangle::WEIGHT_D_INFINITY = 25.0;  // ratio of mm^2/texel^2
double MBTriangle::WEIGHT_V_INFINITY = 2.0; // because sqrt(3) == 1.73 < 2.0

/** Compensates for rounding errors. Is needed to guarantee the metric v_a_a + v_a_b <= v_b_c + v_a_c */
const double ROUNDING_ERROR_COMPENSATION = 0.001;

const MBuint NUM_VERTICES_PER_EDGE = 2u;
const MBuint NUM_VERTICES_PER_TRIANGLE = 3u;

const double COLOR_UCHAR_TO_DOUBLE_FACTOR = 1.0 / 255.0;

const double TEXCOORD_MIN = 0.0;
const double TEXCOORD_MAX = 1.0;

const MBuint X = 0u;
const MBuint Y = 1u;
const MBuint Z = 2u;

const MBuint U = 0u;
const MBuint V = 1u;

const MBuint K1 = 0u;
const MBuint K2 = 1u;
const MBuint P1 = 2u;
const MBuint P2 = 3u;
const MBuint K3 = 4u;
const MBuint K4 = 5u;

const double METERS_TO_MILLIMETERS = 1000.0;

//const MBMatrix4x4 GL_TO_RSP_COORD_SYSTEM = MBMatrix4x4::RotationXYZ( 0.0, 180.0, 0.0 ) * MBMatrix4x4::RotationXYZ( 0.0, 0.0, 180.0 );

const MBMatrix4x4 GL_TO_RSP_COORD_SYSTEM( 1.0,  0.0,  0.0,  0.0,
										  0.0, -1.0,  0.0,  0.0,
										  0.0,  0.0, -1.0,  0.0,
										  0.0,  0.0,  0.0,  1.0 );

double MBTriangle::ms_AccumTimeForOcclusionDetectionSeconds = 0.0;

// OpenCV consts
const MBuint RED = 2u;
const MBuint GREEN = 1u;
const MBuint BLUE = 0u;

////////////////////////////////////////////////////////////////////////////////
// Local Functions BEGIN

/**
  * Samples an image in form of a cv::Mat at the given position and stores the
  * sampled color values into the variables red, green and blue. The image must
  * have the type CV_8UC3, otherwise nothing is sampled and false is returned.
  *
  * @param[in] texture const pointer to the cv::Mat matrix representing the image
  * @param[in] textureCoordinate texture coordinate that specifies the sampling position, elements must be in the range [0 1]
  * @param[out] red pointer to the double variable, that is used to store the sampled red value into it
  * @param[out] green pointer to the double variable, that is used to store the sampled green value into it
  * @param[out] blue pointer to the double variable, that is used to store the sampled blue value into it
  * @param[in] filteringMode filtering mode used to sample the image
  * @param[in] bottemLeftOrigin true if the origin of the texture coordinates is bottom left, false otherwise (top left)
  */
inline bool GetColorValueFromImage( const Mat *texture,
	const MBTextureCoordinate &textureCoordinate,
	double *red, double *green, double *blue,
	const MBFilteringMode &filteringMode = MB_FILTERMODE_NEAREST_NEIGHBOR,
	const bool &bottemLeftOrigin = true )
{
	//if (!texture || texture->type() != CV_64FC3) return false;
	if (!texture || texture->type() != CV_8UC3) return false;

	double texcoord[2];

	int height = texture->rows;
	int width = texture->cols;

	//MBuint step = texture->step / sizeof(double);
	MBuint step = texture->step / sizeof(unsigned char);
	int num_channels = texture->channels();

	//double *data = reinterpret_cast<double *>( texture->data );
	unsigned char *data = reinterpret_cast<unsigned char *>( texture->data );

	double texel_width = 1.0 / width;
	double texel_height = 1.0 / height;

	MBuint x = 0U;
	MBuint y = 0U;

	MBuint base_index = 0U;
	
	bool a = !bottemLeftOrigin; // top-left origin for texture-coords
	bool b = true;				// top-left origin in image -> true for OpenCV

	texcoord[U] = textureCoordinate.mU;
	texcoord[V] = textureCoordinate.mV;

	// if (a xor b) => image origin and texture coordinates origin differ
	if ((a && !b) || (!a && b) ) 
	{
		// flip the vertical axis for the texture coordinate
		texcoord[V] = 1.0 - texcoord[V];
	}

	// coordinates of the desired texel relative to the image
	x = static_cast<MBuint>( texcoord[U] / texel_width );
	y = static_cast<MBuint>( texcoord[V] / texel_height );
// 	x = static_cast<MBuint>( floor( texcoord[U] / texel_width ) );
// 	y = static_cast<MBuint>( floor( texcoord[V] / texel_height ) );

	if (texcoord[U] >= 1.0) x--;
	if (texcoord[V] >= 1.0) y--;

	base_index = y * step + x * num_channels;

	switch (filteringMode)
	{
	case MB_FILTERMODE_NEAREST_NEIGHBOR:
		{
			*red = data[base_index + RED] * COLOR_UCHAR_TO_DOUBLE_FACTOR;
			*green = data[base_index + GREEN] * COLOR_UCHAR_TO_DOUBLE_FACTOR;
			*blue = data[base_index + BLUE] * COLOR_UCHAR_TO_DOUBLE_FACTOR;

// 			*red = data[base_index + RED];
// 			*green = data[base_index + GREEN];
// 			*blue = data[base_index + BLUE];

			break;
		}

	case MB_FILTERMODE_BILINEAR:
		{
			double data_double[3];

			double half_texel_width = texel_width * 0.5;
			double half_texel_height = texel_height * 0.5;

			double lower_left_coef = 0.0;
			double lower_right_coef = 0.0;
			double upper_left_coef = 0.0;
			double upper_right_coef = 0.0;

			double left_residual = 0.0;
			double right_residual = 0.0;
			double upper_residual = 0.0;
			double lower_residual = 0.0;

			MBuint lower_left_neighbor_index = 0U;
			MBuint lower_right_neighbor_index = 0U;
			MBuint upper_left_neighbor_index = 0U;
			MBuint upper_right_neighbor_index = 0U;

			double lower_left_texel[3] = { 0.0, 0.0, 0.0 };
			double lower_right_texel[3] = { 0.0, 0.0, 0.0 };
			double upper_left_texel[3] = { 0.0, 0.0, 0.0 };
			double upper_right_texel[3] = { 0.0, 0.0, 0.0 };

			double v_current_texel = 0.0;
			double v_upper_neighbor = 0.0;
			double v_lower_neighbor = 0.0;

			double u_current_texel = 0.0;
			double u_right_neighbor = 0.0;
			double u_left_neighbor = 0.0;

			MBuint neighbor_index = 0U;

			int width_times_height = width * height;

			// borders for the cases when not all four pixels are taken into account
			// for bilinear filtering
			double x_min = texel_width * 0.5;
			double x_max = 1.0 - x_min;
			double y_min = texel_height * 0.5;
			double y_max = 1.0 - y_min;

			// calculate color values for all four pixels and the
			// correspondig coefficients

			if ((texcoord[U] <= x_min && texcoord[V] <= y_min) ||
				(texcoord[U] >= x_max && texcoord[V] <= y_min) ||
				(texcoord[U] >= x_max && texcoord[V] >= y_max) ||
				(texcoord[U] <= x_min && texcoord[V] >= y_max))
			{
				// one of the corner pixels
				upper_right_coef = 1.0;

				data_double[RED] = data[base_index + RED];
				data_double[GREEN] = data[base_index + GREEN];
				data_double[BLUE] = data[base_index + BLUE];

				memcpy( upper_right_texel, data_double, 3 * sizeof(double) );
			}
			else if (texcoord[U] <= x_min || texcoord[U] >= x_max)
			{
				// at the left edge or at the right edge
				v_upper_neighbor = (y + 1) * texel_height + half_texel_height;
				v_current_texel = v_upper_neighbor - texel_height;
				v_lower_neighbor = v_current_texel - texel_height;

				if (v_upper_neighbor - texcoord[V] < texcoord[V] - v_lower_neighbor)
				{
					// filter between the current texel and the upper neighbor
					upper_right_coef = (texcoord[V] - v_current_texel) * height;
					lower_right_coef = (v_upper_neighbor - texcoord[V]) * height;
					neighbor_index = base_index + step;
				}
				else
				{
					// filter between the current texel and the lower neighbor
					lower_right_coef = (texcoord[V] - v_lower_neighbor) * height;
					upper_right_coef = (v_current_texel - texcoord[V]) * height;
					neighbor_index = base_index - step;
				}

				data_double[RED] = data[neighbor_index + RED];
				data_double[GREEN] = data[neighbor_index + GREEN];
				data_double[BLUE] = data[neighbor_index + BLUE];

				memcpy( upper_right_texel, data_double, 3 * sizeof(double) );

				data_double[RED] = data[base_index + RED];
				data_double[GREEN] = data[base_index + GREEN];
				data_double[BLUE] = data[base_index + BLUE];

				memcpy( lower_right_texel, data_double, 3 * sizeof(double) );
			}
			else if (texcoord[V] <= y_min || texcoord[V] >= y_max)
			{
				// at the lower edge or at the upper edge

				u_right_neighbor = (x + 1) * texel_width + half_texel_width;
				u_current_texel = u_right_neighbor - texel_width;
				u_left_neighbor = u_current_texel - texel_width;

				if (u_right_neighbor - texcoord[U] < texcoord[U] - u_left_neighbor)
				{
					// filter between the current texel and the right neighbor
					upper_right_coef = (texcoord[U] - u_current_texel) * width;
					upper_left_coef = (u_right_neighbor - texcoord[U]) * width;
					neighbor_index = base_index + num_channels;
				}
				else
				{
					// filter between the current texel and the left neighbor
					upper_left_coef = (texcoord[U] - u_left_neighbor) * width;
					upper_right_coef = (u_current_texel - texcoord[U]) * width;
					neighbor_index = base_index - num_channels;
				}

				data_double[RED] = data[neighbor_index + RED];
				data_double[GREEN] = data[neighbor_index + GREEN];
				data_double[BLUE] = data[neighbor_index + BLUE];

				memcpy( upper_right_texel, data_double, 3 * sizeof(double) );

				data_double[RED] = data[base_index + RED];
				data_double[GREEN] = data[base_index + GREEN];
				data_double[BLUE] = data[base_index + BLUE];

				memcpy( upper_left_texel, data_double, 3 * sizeof(double) );
			}
			else
			{
				// somewhere in the middle
				// sample all four neighbors

				u_right_neighbor = (x + 1) * texel_width + half_texel_width;
				u_current_texel = u_right_neighbor - texel_width;
				u_left_neighbor = u_current_texel - texel_width;

				v_upper_neighbor = (y + 1) * texel_height + half_texel_height;
				v_current_texel = v_upper_neighbor - texel_height;
				v_lower_neighbor = v_current_texel - texel_height;

				if (v_upper_neighbor - texcoord[V] < texcoord[V] - v_lower_neighbor)
				{
					// take the upper neighbors
					lower_residual = texcoord[V] - v_current_texel;
					upper_residual = v_upper_neighbor - texcoord[V];

					if (u_right_neighbor - texcoord[U] < texcoord[U] - u_left_neighbor)
					{
						// take the upper right neighbors
						right_residual = u_right_neighbor - texcoord[U];
						left_residual = texcoord[U] - u_current_texel;

						upper_left_neighbor_index = base_index + step;
						upper_right_neighbor_index = base_index + step + num_channels;
						lower_left_neighbor_index = base_index;
						lower_right_neighbor_index = base_index + num_channels;
					}
					else
					{
						// take the upper left neighbors
						right_residual = u_current_texel - texcoord[U];
						left_residual = texcoord[U] - u_left_neighbor;

						upper_left_neighbor_index = base_index + step - num_channels;
						upper_right_neighbor_index = base_index + step;
						lower_left_neighbor_index = base_index - num_channels;
						lower_right_neighbor_index = base_index;
					}
				}
				else
				{
					// take the lower neighbors
					lower_residual = texcoord[V] - v_lower_neighbor;
					upper_residual = v_current_texel - texcoord[V];

					if (u_right_neighbor - texcoord[U] < texcoord[U] - u_left_neighbor)
					{
						// take the lower right neighbors
						right_residual = u_right_neighbor - texcoord[U];
						left_residual = texcoord[U] - u_current_texel;

						upper_left_neighbor_index = base_index;
						upper_right_neighbor_index = base_index + num_channels;
						lower_left_neighbor_index = base_index - step;
						lower_right_neighbor_index = base_index - step + num_channels;
					}
					else
					{
						// take the lower left neighbors
						right_residual = u_current_texel - texcoord[U];
						left_residual = texcoord[U] - u_left_neighbor;

						upper_left_neighbor_index = base_index - num_channels;
						upper_right_neighbor_index = base_index;
						lower_left_neighbor_index = base_index - step - num_channels;
						lower_right_neighbor_index = base_index - step;
					}
				}

				// The coefficients
				upper_right_coef = width_times_height * left_residual * lower_residual;
				upper_left_coef = width_times_height * right_residual * lower_residual;
				lower_left_coef = width_times_height * right_residual * upper_residual;
				lower_right_coef = width_times_height * left_residual * upper_residual;

				// Upper left neighbor
				data_double[RED] = data[upper_left_neighbor_index + RED];
				data_double[GREEN] = data[upper_left_neighbor_index + GREEN];
				data_double[BLUE] = data[upper_left_neighbor_index + BLUE];

				memcpy( upper_left_texel, data_double, 3 * sizeof(double) );

				// Upper right neighbor
				data_double[RED] = data[upper_right_neighbor_index + RED];
				data_double[GREEN] = data[upper_right_neighbor_index + GREEN];
				data_double[BLUE] = data[upper_right_neighbor_index + BLUE];

				memcpy( upper_right_texel, data_double, 3 * sizeof(double) );

				// Lower left neighbor
				data_double[RED] = data[lower_left_neighbor_index + RED];
				data_double[GREEN] = data[lower_left_neighbor_index + GREEN];
				data_double[BLUE] = data[lower_left_neighbor_index + BLUE];

				memcpy( lower_left_texel, data_double, 3 * sizeof(double) );

				// Lower right neighbor
				data_double[RED] = data[lower_right_neighbor_index + RED];
				data_double[GREEN] = data[lower_right_neighbor_index + GREEN];
				data_double[BLUE] = data[lower_right_neighbor_index + BLUE];

				memcpy( lower_right_texel, data_double, 3 * sizeof(double) );
			}

			// Final Red
			*red = COLOR_UCHAR_TO_DOUBLE_FACTOR *
				(lower_left_coef * lower_left_texel[RED] +
				lower_right_coef * lower_right_texel[RED] +
				upper_left_coef * upper_left_texel[RED] +
				upper_right_coef * upper_right_texel[RED]);

			// Final Green
			*green = COLOR_UCHAR_TO_DOUBLE_FACTOR *
				(lower_left_coef * lower_left_texel[GREEN] +
				lower_right_coef * lower_right_texel[GREEN] +
				upper_left_coef * upper_left_texel[GREEN] +
				upper_right_coef * upper_right_texel[GREEN]);

			// Final Blue
			*blue = COLOR_UCHAR_TO_DOUBLE_FACTOR *
				(lower_left_coef * lower_left_texel[BLUE] +
				lower_right_coef * lower_right_texel[BLUE] +
				upper_left_coef * upper_left_texel[BLUE] +
				upper_right_coef * upper_right_texel[BLUE]);

// 			// Final Red
// 			*red =
// 				lower_left_coef * lower_left_texel[RED] +
// 				lower_right_coef * lower_right_texel[RED] +
// 				upper_left_coef * upper_left_texel[RED] +
// 				upper_right_coef * upper_right_texel[RED];
// 
// 			// Final Green
// 			*green =
// 				lower_left_coef * lower_left_texel[GREEN] +
// 				lower_right_coef * lower_right_texel[GREEN] +
// 				upper_left_coef * upper_left_texel[GREEN] +
// 				upper_right_coef * upper_right_texel[GREEN];
// 
// 			// Final Blue
// 			*blue =
// 				lower_left_coef * lower_left_texel[BLUE] +
// 				lower_right_coef * lower_right_texel[BLUE] +
// 				upper_left_coef * upper_left_texel[BLUE] +
// 				upper_right_coef * upper_right_texel[BLUE];
			break;
		}

	default:
		{
			return false;
		}
	}

	return true;
}

// Local Functions END
////////////////////////////////////////////////////////////////////////////////

/**
  * Projects the points in vertices onto the image plane of the camera, that
  * is defined by the given parameters, and stores the image points in pixel
  * coordinates (top left origin) into imagePoints
  *
  * @param[in] vertices vector with all the vertices in 3D space that are projected
  * @param[in] worldToViewTransformation 4x4 matrix describing the transformation from world space to view space of the camera
  * @param[in] cameraMatrix 3x3 camera matrix of the form [fx 0 cx; 0 fy cy; 0 0 1]
  * @param[out] imagePoints projected vertices in pixel coordinates
  * @param[in] cameraModel const pointer to the MBCameraModel object representing the camera model
  */
void ProjectPoints( const vector<MBVec4> &vertices,
					const MBMatrix4x4 &worldToViewTransformation,
					const MBMatrix3x3 &cameraMatrix,
					vector<MBVec2> *imagePoints,
					const MBCameraModel *cameraModel )
{
	MBVec4 point_camera_space;
	MBVec2 point_image_space;

	for (vector<MBVec4>::const_iterator point_it = vertices.begin();
		point_it != vertices.end(); point_it++)
	{
		if (cameraModel->IsImageOriginTopLeft())
		{
			point_camera_space = GL_TO_RSP_COORD_SYSTEM * worldToViewTransformation * (*point_it);
		}
		else
		{
			point_camera_space = worldToViewTransformation * (*point_it);
		}

		point_image_space = MBVec2( point_camera_space[X] / point_camera_space[Z],
									point_camera_space[Y] / point_camera_space[Z] );
		if (cameraModel->IsImageOriginTopLeft())
		{
			point_image_space[X] = cameraMatrix.At( 0, 0 ) * point_image_space[X] + cameraMatrix.At( 0, 2 );
			point_image_space[Y] = cameraMatrix.At( 1, 1 ) * point_image_space[Y] + cameraMatrix.At( 1, 2 );
		}
		else
		{
			point_image_space[X] = -cameraMatrix.At( 0, 0 ) * point_image_space[X] + cameraMatrix.At( 0, 2 );
			point_image_space[Y] = cameraModel->GetIntrinsicParameters().mImageHeightInPixels - (-cameraMatrix.At( 1, 1 ) * point_image_space[Y] + cameraMatrix.At( 1, 2 ));
		}
		
		imagePoints->push_back( point_image_space );
	}
}

/**
  * Constructs a new MBTriangle object with the given parameters
  *
  * @param firstVertexIndex index of the first vertex of the triangle
  * @param secondVertexIndex index of the second vertex of the triangle
  * @param thirdVertexIndex index of the third vertex of the triangle
  * @param vertexCoordinates 3D vertex coordinates of all vertices of the model
  * @param octree Octree, that contains all the triangles of the model
  */
MBTriangle::MBTriangle( const MBuint &firstVertexIndex,
						const MBuint &secondVertexIndex,
						const MBuint &thirdVertexIndex,
						const std::vector<double*> *vertexCoordinates ) :
						//const MBOctree *octree ) :
	mInfinitelyThin( false )
{
	mVertexIndices[0] = firstVertexIndex;
	mVertexIndices[1] = secondVertexIndex;
	mVertexIndices[2] = thirdVertexIndex;

	mNormal[X] = 0.0;
	mNormal[Y] = 0.0;
	mNormal[Z] = 0.0;

	mVertexCoordinates = vertexCoordinates;
	//mOctree = octree;
}

/**
  * Adds a neighbor of this triangle. A neighbor is another triangle, that shares
  * exactly one edge with this triangle.
  *
  * @param neighbor neighbor of this triangle
  */
void MBTriangle::AddNeighbor( MBTriangle *neighbor )
{
	mNeighbors.insert( neighbor );
}

/**
  * Analyzes the three edges of this triangle in conjunction with the camera
  * position of the given label. Every of these combinations defines an
  * MBTriangleSightTriangle object. For each of these MBTriangleSightTriangle
  * objects that intersects with any other triangle of the model, the three
  * defining parameters (two vertex indices of the edge and the camera position
  * ID) are added to the static indicesInIntersectingSightTriangles set.
  *
  * @param label label under consideration
  * const MBLabelingOptions &options
  */
// void MBTriangle::AddIntersectingSightTriangleIndicesToSet(
// 	const MBCameraView *label,
// 	const MBLabelingOptions &options ) const
// {
// 	// Check, whether the two triangles are neighbors, and which edge they share
// 
// 	vector<MBTextureCoordinate> triangle_texture_coordinates;
// 
// 	// Look, if the edge under consideration is entirely in front of the cameras
// 	const double *current_vertex_pos = 0;
// 	double z_viewspace[3];
// 
// 	set<MBCameraPosIDWithVertexIndices>::iterator it;
// 
// 	MBVec3 cam_pos = label->GetPosition();
// 	MBuint edge_vertex_indices[NUM_VERTICES_PER_EDGE];
// 
// 	current_vertex_pos = GetVertexPosition( 0 );
// 	z_viewspace[0] = (label->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];
// 
// 	current_vertex_pos = GetVertexPosition( 1 );
// 	z_viewspace[1] = (label->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];
// 
// 	current_vertex_pos = GetVertexPosition( 2 );
// 	z_viewspace[2] = (label->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];
// 
// 	MBuint vertex_indices[3] = { 0u, 1u, 2u };
// 	// Calculate the Texture Coordinates
// 	GetTextureCoords( label, &triangle_texture_coordinates );
// 
// 	// Edge 0-1
// 	if (z_viewspace[0] < 0.0 && z_viewspace[1] < 0.0)
// 	{
// 		// Test the texture coordinates of the first triangle
// 		if (!(triangle_texture_coordinates[0].mU < TEXCOORD_MIN || triangle_texture_coordinates[0].mU > TEXCOORD_MAX ||
// 			triangle_texture_coordinates[1].mV < TEXCOORD_MIN || triangle_texture_coordinates[1].mV > TEXCOORD_MAX))
// 		{
// //			if (GetSquaredSineViewNormal( label, options, &vertex_indices[0], &vertex_indices[1] ) != WEIGHT_D_INFINITY)
// 			if (GetSquaredSineViewNormal( label, options, &vertex_indices[0], &vertex_indices[1] ) != MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL))
// 			{
// 				it = indicesInIntersectingSightTriangles.find( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), mVertexIndices[0], mVertexIndices[1] ) );
// 
// 				if (it == indicesInIntersectingSightTriangles.end())
// 				{
// 					edge_vertex_indices[0] = mVertexIndices[0];
// 					edge_vertex_indices[1] = mVertexIndices[1];
// 
// 					bool intersection = mOctree->IsOtherTriangleIntersectingSightTriangle( edge_vertex_indices, cam_pos );
// 
// 					if (intersection)
// 					{
// 						indicesInIntersectingSightTriangles.insert( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), mVertexIndices[0], mVertexIndices[1] ) );
// 					}
// 				}
// 			}
// 		}
// 	}
// 
// 	// Edge 2-0
// 	if (z_viewspace[2] < 0.0 && z_viewspace[0] < 0.0)
// 	{
// 		// Test the texture coordinates of the first triangle
// 		if (!(triangle_texture_coordinates[0].mU < TEXCOORD_MIN || triangle_texture_coordinates[0].mU > TEXCOORD_MAX ||
// 			triangle_texture_coordinates[2].mV < TEXCOORD_MIN || triangle_texture_coordinates[2].mV > TEXCOORD_MAX))
// 		{
// //			if (GetSquaredSineViewNormal( label, options, &vertex_indices[0], &vertex_indices[2] ) != WEIGHT_D_INFINITY)
// 			if (GetSquaredSineViewNormal( label, options, &vertex_indices[0], &vertex_indices[2] ) != MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL))
// 			{
// 				it = indicesInIntersectingSightTriangles.find( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), mVertexIndices[2], mVertexIndices[0] ) );
// 
// 				if (it == indicesInIntersectingSightTriangles.end())
// 				{
// 					edge_vertex_indices[0] = mVertexIndices[2];
// 					edge_vertex_indices[1] = mVertexIndices[0];
// 
// 					bool intersection = mOctree->IsOtherTriangleIntersectingSightTriangle( edge_vertex_indices, cam_pos );
// 
// 					if (intersection)
// 					{
// 						indicesInIntersectingSightTriangles.insert( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), mVertexIndices[2], mVertexIndices[0] ) );
// 					}
// 				}
// 			}
// 		}
// 	}
// 
// 	// Edge 1-2
// 	if (z_viewspace[1] < 0.0 && z_viewspace[2] < 0.0)
// 	{
// 		// Test the texture coordinates of the first triangle
// 		if (!(triangle_texture_coordinates[1].mU < TEXCOORD_MIN || triangle_texture_coordinates[1].mU > TEXCOORD_MAX ||
// 			triangle_texture_coordinates[2].mV < TEXCOORD_MIN || triangle_texture_coordinates[2].mV > TEXCOORD_MAX))
// 		{
// //			if (GetSquaredSineViewNormal( label, options, &vertex_indices[1], &vertex_indices[2] ) != WEIGHT_D_INFINITY)
// 			if (GetSquaredSineViewNormal( label, options, &vertex_indices[1], &vertex_indices[2] ) != MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL))
// 			{
// 				it = indicesInIntersectingSightTriangles.find( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), mVertexIndices[1], mVertexIndices[2] ) );
// 
// 				if (it == indicesInIntersectingSightTriangles.end())
// 				{
// 					edge_vertex_indices[0] = mVertexIndices[1];
// 					edge_vertex_indices[1] = mVertexIndices[2];
// 
// 					bool intersection = mOctree->IsOtherTriangleIntersectingSightTriangle( edge_vertex_indices, cam_pos );
// 
// 					if (intersection)
// 					{
// 						indicesInIntersectingSightTriangles.insert( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), mVertexIndices[1], mVertexIndices[2] ) );
// 					}
// 				}
// 			}
// 		}
// 	}
// }

/**
  * Calculates the normal vector of this triangle and stores it in the
  * mNormal member variable
  *
  * @param vertexCoordinates vertex coordinates of the model
  */
void MBTriangle::CalculateNormal( vector<double*> &vertexCoordinates )
{
	glm::vec3 v1_to_v0(
		vertexCoordinates[mVertexIndices[0]][X] - vertexCoordinates[mVertexIndices[1]][X],
		vertexCoordinates[mVertexIndices[0]][Y] - vertexCoordinates[mVertexIndices[1]][Y],
		vertexCoordinates[mVertexIndices[0]][Z] - vertexCoordinates[mVertexIndices[1]][Z] );

	glm::vec3 v1_to_v2(
		vertexCoordinates[mVertexIndices[2]][X] - vertexCoordinates[mVertexIndices[1]][X],
		vertexCoordinates[mVertexIndices[2]][Y] - vertexCoordinates[mVertexIndices[1]][Y],
		vertexCoordinates[mVertexIndices[2]][Z] - vertexCoordinates[mVertexIndices[1]][Z] );

	glm::vec3 normal_full = glm::cross( v1_to_v2, v1_to_v0 );
	glm::vec3 normal = glm::normalize( glm::cross( v1_to_v2, v1_to_v0 ) );

	mNormal[X] = normal[X];
	mNormal[Y] = normal[Y];
	mNormal[Z] = normal[Z];

	// This comparison is no mistake! It tests for NaN, which is the case when the
	// triangle is infinitely thin
	if (!(mNormal[X] == mNormal[X]))
	{
		mInfinitelyThin = true;
	}
}

/**
  * Returns the neighbors of this triangle. A neighbor is a triangle that shares
  * exactly one edge with this triangle.
  *
  * @return const reference to the set, that stores pointer to all the neighboring MBTriangle objects
  */
const set<MBTriangle*> &MBTriangle::GetNeighbors( void ) const
{
	return mNeighbors;
}

/**
  * Returns the normal vector
  *
  * @return const pointer to the normal vector, stored in a double array with three elements
  */
const double *MBTriangle::GetNormal( void ) const
{
	return mNormal;
}

/**
  * Returns a const pointer to the vertex indices, stored in an array with three elements
  *
  * @return const pointer to the vertex indices, stored in an array with three elements
  */
const MBuint *MBTriangle::GetVertexIndices( void ) const
{
	return mVertexIndices;
}

/**
  * Returns the vertex position in 3D space of the vertex, that belongs to the
  * given index (0, 1 or 2). The vertex position is returned as a const pointer
  * to the first element of the array, that consists of three double elements
  * (X, Y, Z).
  *
  * @param index index of the vertex (0, 1 or 2)
  * @return vertex position in 3D space of the vertex, that belongs to the given index
  */
const double *MBTriangle::GetVertexPosition( const MBuint &index ) const
{
	return (*mVertexCoordinates)[mVertexIndices[index]];
}

/**
  * Maps this triangle onto the image, that is defined by the given label, and
  * stores the resulting texture coordinates of the projected points into
  * the textureCoordinate vector
  *
  * @param[in] label label that is used to project the vertices of this triangle onto the corresponding image
  * @param[out] texture coordinates of the projected points of the triangle
  */
void MBTriangle::GetTextureCoords( const MBCameraView *label, vector<MBTextureCoordinate> *textureCoordinate ) const
{
	const double *current_vertex_pos = 0;

	vector<MBVec4> vertices;
	vector<MBVec2> image_points;

	const MBCameraModel *camera_model = label->GetCameraModel();

	vertices.reserve( NUM_VERTICES_PER_TRIANGLE );
	image_points.reserve( NUM_VERTICES_PER_TRIANGLE );

	for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		current_vertex_pos = GetVertexPosition( i );

		vertices.push_back( MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ) );
	}

	// Project the points onto the image Plane
	ProjectPoints(  vertices,
		label->GetWorldSpaceToViewSpaceTransformation(),
		camera_model->GetCameraMatrix(),
		&image_points,
		camera_model );

	const double X_MAX = static_cast<double>( camera_model->GetIntrinsicParameters().mImageWidthInPixels );
	const double Y_MAX = static_cast<double>( camera_model->GetIntrinsicParameters().mImageHeightInPixels );

	const MBVec2 &label_shift = label->GetTexcoordShiftVector();

	if (label_shift[U] != 0.0 || label_shift[V] != 0.0)
	{
		for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
		{
			textureCoordinate->push_back(
				MBTextureCoordinate( (image_points[i][U] / X_MAX) + label_shift[U],
									 1.0 - ((image_points[i][V] / Y_MAX) + label_shift[V]) ) );
		}
	}
	else
	{
		for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
		{
			textureCoordinate->push_back( MBTextureCoordinate( image_points[i][U] / X_MAX, 1.0 - (image_points[i][V] / Y_MAX) ) );
		}
	}
}

/**
  * Returns the squared sine of the angle between the view vector (camPos -
  * triangleCenterOfMass) and the normal vector of the triangle when the dot
  * product of these vectors is nonnegative, WEIGHT_D_INFINITY otherwise. It is
  * possible to pass pointers to two vertex indices. Then not the
  * triangleCenterOfMass is calculated, but
  * (firstVertexIndex + secondVertexIndex) * 0.5 instead.
  *
  * @param label label under consideration
  * @param firstVertexIndex optional pointer to the first vertex index
  * @param secondVertexIndex optional pointer to the second vertex index
  * @return squared sine of the angle between the view vector and the normal vector (or WEIGHT_D_INFINITY)
  */
double MBTriangle::GetSquaredSineViewNormal(
	const MBCameraView *label,
	const MBLabelingOptions &labelingOptions,
	const MBuint *firstVertexIndex,
	const MBuint *secondVertexIndex ) const
{
	// Calculate center of mass of the triangle
	const double *current_vertex_pos = 0;
	const MBVec3 &camera_pos = label->GetPosition();

	double center_of_mass[NUM_VERTICES_PER_TRIANGLE]  = { 0.0, 0.0, 0.0 };

	if (firstVertexIndex == 0 || secondVertexIndex == 0)
	{
		for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
		{
			current_vertex_pos = GetVertexPosition( i );

			center_of_mass[X] += current_vertex_pos[X];
			center_of_mass[Y] += current_vertex_pos[Y];
			center_of_mass[Z] += current_vertex_pos[Z];
		}

		center_of_mass[X] /= 3.0;
		center_of_mass[Y] /= 3.0;
		center_of_mass[Z] /= 3.0;
	}
	else
	{
		current_vertex_pos = GetVertexPosition( *firstVertexIndex );
		center_of_mass[X] += current_vertex_pos[X];
		center_of_mass[Y] += current_vertex_pos[Y];
		center_of_mass[Z] += current_vertex_pos[Z];

		current_vertex_pos = GetVertexPosition( *secondVertexIndex );
		center_of_mass[X] += current_vertex_pos[X];
		center_of_mass[Y] += current_vertex_pos[Y];
		center_of_mass[Z] += current_vertex_pos[Z];

		center_of_mass[X] *= 0.5;
		center_of_mass[Y] *= 0.5;
		center_of_mass[Z] *= 0.5;
	}

	const double *p_normal = GetNormal();

	cv::Vec3d normal( p_normal[X], p_normal[Y], p_normal[Z] );
	cv::Vec3d view_vector(
		camera_pos[X] - center_of_mass[X],
		camera_pos[Y] - center_of_mass[Y],
		camera_pos[Z] - center_of_mass[Z] );

	double distance = cv::norm( view_vector );

	view_vector[X] /= distance;
	view_vector[Y] /= distance;
	view_vector[Z] /= distance;

	double dot_product = normal.dot( view_vector );

	if (dot_product <= 0.0)
	{
		// The Triangle is looked at from behind
//		return WEIGHT_D_INFINITY;
		return MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL);
	}

	double theta = acos( dot_product );

	double sin_theta = sin( theta );

	double angle_weight = labelingOptions.GetAngleWeight();

	// Return linear combination between angle cost and distance cost
	return angle_weight * sin_theta * sin_theta + (1.0 - angle_weight) * std::min( 1.0, distance / labelingOptions.GetMaximumDistance() );
}

/**
  * Returns the value returned by GetSquaredSineViewNormal() or
  * WEIGHT_D_INFINITY if the triangle is not entirely inside the image after
  * projection.
  *
  * @param label label under consideration
  * @return value returned by GetSquaredSineViewNormal() or WEIGHT_D_INFINITY
  */
double MBTriangle::GetWeightD(
	const MBCameraView *label,
	MBDistanceMap *distanceMap,
	const MBLabelingOptions &labelingOptions ) const
{
	if (!IsTriangleCompletelyInside( *label, distanceMap ) || IsInfinitelyThin())
	{
//		return WEIGHT_D_INFINITY;
		return MBWeightMethodManager::Get().GetCurrentMethodWeightInfinity();
	}
	else
	{
		if( label->GetMaskPath().empty() || IsTriangleInsideMask(*label) )

			switch( MBWeightMethodManager::Get().GetCurrentMethod() )
			{
				case MB_REAL_TO_PROJECTED_AREA_RATIO:
					return GetRealAreaPerProjectedArea(*label);

				case MB_SQUARED_SINE_VIEW_NORMAL:
					return GetSquaredSineViewNormal( label, labelingOptions );

				default:
					assert(false && "Non-existent MBWeightMethod");
			//return GetSquaredSineViewNormal( label, labelingOptions );
			//return GetRealAreaPerProjectedArea(*label);
			}
		else
//			return WEIGHT_D_INFINITY;
			return MBWeightMethodManager::Get().GetCurrentMethodWeightInfinity();
	}
}

/**
  * If this triangle is not entirely inside the image of the label after
  * projection WEIGHT_D_INFINITY is returned. If it is, the discrete integral
  * of the gradient of the projected area is calculated, scaled by
  * (perimeter / area) of this area, and then it is returned.
  *
  * @param label label under consideration
  * @param imagePyramidLevel image pyramid level that is used for the gradient calculation
  * @return WEIGHT_D_INFINITY or scaled discrete integral of the gradient of the projected area
  */
double MBTriangle::GetWeightDIntegral( const MBCameraView *label, MBDistanceMap *distanceMap, const MBuint &imagePyramidLevel ) const
{
	if (!IsTriangleCompletelyInside( *label, distanceMap ))
	{
//		return WEIGHT_D_INFINITY;
		return MBWeightMethodManager::Get().GetCurrentMethodWeightInfinity();
	}

	const Mat *image = MBImageManager::Get().GetImage( label->GetUndistortedImagePath(), imagePyramidLevel );

	assert( image && image->data );

	vector<MBTextureCoordinate> triangle_texture_coordinates;
	GetTextureCoords( label, &triangle_texture_coordinates );

	// Test the texture coordinates
	const double TEXCOORD_MIN = 0.0;
	const double TEXCOORD_MAX = 1.0;

	for (vector<MBTextureCoordinate>::const_iterator texcoord_it = triangle_texture_coordinates.begin();
		texcoord_it != triangle_texture_coordinates.end(); texcoord_it++)
	{
		if ((*texcoord_it).mU < TEXCOORD_MIN || (*texcoord_it).mU > TEXCOORD_MAX ||
			(*texcoord_it).mV < TEXCOORD_MIN || (*texcoord_it).mV > TEXCOORD_MAX)
//			return WEIGHT_D_INFINITY;
			return MBWeightMethodManager::Get().GetCurrentMethodWeightInfinity();
	}

// 	cv::namedWindow( "image" );
// 	cv::imshow( "image", *image );
// 	cv::waitKey( 0 );

	vector<MBTextureCoordinate> &t = triangle_texture_coordinates;

	double a = sqrt( pow( t[1].mU - t[0].mU, 2 ) + pow( t[1].mV - t[0].mV, 2 ) );
	double b = sqrt( pow( t[2].mU - t[1].mU, 2 ) + pow( t[2].mV - t[1].mV, 2 ) );
	double c = sqrt( pow( t[0].mU - t[2].mU, 2 ) + pow( t[0].mV - t[2].mV, 2 ) );
	double perimeter = a + b + c;
	double s = perimeter * 0.5;

	// Heron's area formula for triangles
	double area = sqrt( s * (s - a) * (s - b) * (s - c) );

	double normalization_factor = perimeter / area;

	int pixel_coords[3][2];

	int image_width = image->cols;
	int image_height = image->rows;

	double texel_width = 1.0 / image_width;
	double texel_height = 1.0 / image_height;

	int min_point[2] = { image_width - 1, image_height - 1 };
	int max_point[2] = { 0, 0 };

	for (int i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		// coordinates of the desired texel relative to the image
		pixel_coords[i][X] = static_cast<int>( floor( triangle_texture_coordinates[i].mU / texel_width ) );

		// Flip the y-coordinate, because the origin of the image in OpenCV is top left
		triangle_texture_coordinates[i].mV = 1.0 - triangle_texture_coordinates[i].mV;

		pixel_coords[i][Y] = static_cast<int>( floor( triangle_texture_coordinates[i].mV / texel_height ) );

		if (pixel_coords[i][X] < min_point[X]) min_point[X] = pixel_coords[i][X];
		if (pixel_coords[i][Y] < min_point[Y]) min_point[Y] = pixel_coords[i][Y];
		if (pixel_coords[i][X] > max_point[X]) max_point[X] = pixel_coords[i][X];
		if (pixel_coords[i][Y] > max_point[Y]) max_point[Y] = pixel_coords[i][Y];
	}

	int roi_width = max_point[X] - min_point[X] + 3;
	int roi_height = max_point[Y] - min_point[Y] + 3;

	cv::Rect roi_rect( min_point[X] - 1, min_point[Y] - 1, roi_width, roi_height );

	int clipped_x = min( max( roi_rect.x, 0 ), image_width - 1);
	int clipped_y = min( max( roi_rect.y, 0 ), image_height - 1);
	int clipped_width = min( max( roi_rect.x + roi_width - clipped_x, 1), image_width - clipped_x );
	int clipped_height = min( max( roi_rect.y + roi_height - clipped_y, 1), image_height - clipped_y );

	cv::Rect clipped_rect( clipped_x, clipped_y, clipped_width, clipped_height );

	Mat roi_mat( *image, clipped_rect );

	Mat mask = Mat::zeros( clipped_rect.size(), CV_8U );

	double g_k_h[1][3] = { -0.5, 0.0, 0.5 };
	double g_k_v[3][1] = { -0.5, 0.0, 0.5 };

	Mat gradient_kernel_horizontal( 1, 3, CV_64FC1, g_k_h );
	Mat gradient_kernel_vertical( 3, 1, CV_64FC1, g_k_v );
	
	Mat gradient_image_horizontal;
	Mat gradient_image_vertical;

	cv::filter2D( roi_mat, gradient_image_horizontal, /*roi_mat.depth()*/CV_64FC3, gradient_kernel_horizontal, Point( 1, 0 ), 0, cv::BORDER_REPLICATE );
	cv::filter2D( roi_mat, gradient_image_vertical, /*roi_mat.depth()*/CV_64FC3, gradient_kernel_vertical, Point( 0, 1 ), 0, cv::BORDER_REPLICATE );

	cv::Point triangle_points[3];

	for (MBuint index = 0; index < NUM_VERTICES_PER_TRIANGLE; index++)
	{
		triangle_points[index].x = static_cast<int>( floor( triangle_texture_coordinates[index].mU * image_width ) ) - roi_rect.x;
		triangle_points[index].y = static_cast<int>( floor( triangle_texture_coordinates[index].mV * image_height ) ) - roi_rect.y;
	}
	
	cv::fillConvexPoly( mask, &triangle_points[0], NUM_VERTICES_PER_TRIANGLE, cv::Scalar( 255.0, 255.0, 255.0 ) );

// 	Mat resized;
// 	cv::resize( mask, resized, cv::Size(), 2.0, 2.0, cv::INTER_NEAREST );
// 	cv::namedWindow( "image" );
// 	cv::imshow( "image", resized );
// 	cv::waitKey( 0 );

	// compute sum of positive matrix elements, iterator-based variant
 	double sum = 0.0;
	MatConstIterator_<unsigned char> it = mask.begin<unsigned char>(), it_end = mask.end<unsigned char>();
	
	Point current_pos;

	Vec3d gradient_x;
	Vec3d gradient_y;

	for(; it != it_end; ++it)
	{
		if (*it == 255u)
		{
			current_pos = it.pos();
			gradient_x = gradient_image_horizontal.at<Vec3d>(current_pos);
			gradient_y = gradient_image_vertical.at<Vec3d>(current_pos);

			// Calculate the gradient using the Frobenius Norm of the Jacobi Matrix
			// See http://mathworld.wolfram.com/FrobeniusNorm.html for details

			sum += gradient_x[RED] * gradient_x[RED] + gradient_x[GREEN] * gradient_x[GREEN] + gradient_x[BLUE] * gradient_x[BLUE] + 
				gradient_y[RED] * gradient_y[RED] + gradient_y[GREEN] * gradient_y[GREEN] + gradient_y[BLUE] * gradient_y[BLUE];
		}
	}

	return -normalization_factor * sum;
}

/**
  * Returns the discrete integral of color differences along the sharing edge 
  * of the labels. If the labels are no neighbors, or the edge is partly outside
  * of one of the labels, WEIGHT_V_INFINITY is returned.
  *
  * @param label first label under consideration
  * @param neighborNode MBTriangle, that should be a neighbor of this MBTriangle
  * @param neighborLabel label, that is used to texture the neighboring MBTriangle
  * @param filteringMode filtering mode used when sampling the images
  * @param imagePyramidLevel image pyramid level used
  * @param numSamples number of samples that are taken when calculating the discrete integral
  * @return discrete integral of color differences along the sharing edge of the labels (or WEIGHT_V_INFINITY)
  */
double MBTriangle::GetWeightV(
	const MBCameraView *label,
	const MBTriangle *neighborNode,
	const MBCameraView *neighborLabel,
	MBDistanceMap *distanceMap,
	const MBLabelingOptions &labelingOptions,
	const MBFilteringMode &filteringMode,
	const MBuint &imagePyramidLevel,
	const MBint &numSamples ) const
{
	// Check, whether one of the triangle is infinitely thin
	if (IsInfinitelyThin() || neighborNode->IsInfinitelyThin())
		return WEIGHT_V_INFINITY;

	// Check, whether the two triangles are neighbors, and which edge they share

	vector<MBTextureCoordinate> first_triangle_texture_coordinates;
	vector<MBTextureCoordinate> second_triangle_texture_coordinates;

	MBTextureCoordinate first_triangle_integral_coords_from_to[NUM_VERTICES_PER_EDGE];
	MBTextureCoordinate second_triangle_integral_coords_from_to[NUM_VERTICES_PER_EDGE];

	vector<pair<MBuint, MBuint>> adjacent_vertices_array_indices;
	adjacent_vertices_array_indices.reserve( NUM_VERTICES_PER_EDGE );
	
	// Are they neighbors?
	// Look for the indices in the other triangle
	for (MBuint first_tr_i = 0u; first_tr_i < NUM_VERTICES_PER_TRIANGLE; first_tr_i++)
	{
		for (MBuint second_tr_i = 0u; second_tr_i < NUM_VERTICES_PER_TRIANGLE; second_tr_i++)
		{
			if (mVertexIndices[first_tr_i] == neighborNode->mVertexIndices[second_tr_i])
			{
				adjacent_vertices_array_indices.push_back( make_pair( first_tr_i, second_tr_i ) );
			}
		}
	}

	if (adjacent_vertices_array_indices.size() < NUM_VERTICES_PER_EDGE)
		return WEIGHT_V_INFINITY;

	// Look, if the edge under consideration is entirely in front of the cameras
	const double *current_vertex_pos = 0;
	double z_viewspace;

	for (MBuint i = 0; i < NUM_VERTICES_PER_EDGE; i++)
	{
		current_vertex_pos = GetVertexPosition( adjacent_vertices_array_indices[i].first );
		z_viewspace = (label->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		if (z_viewspace >= 0.0)
		{
			// The point is behind the camera, and can therefore not be seen
			return WEIGHT_V_INFINITY;
		}

		// Not necessary, vertex positions are identical
		//current_vertex_pos = neighborNode->GetVertexPosition( adjacent_vertices_array_indices[i].second );
		z_viewspace = (neighborLabel->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		if (z_viewspace >= 0.0)
		{
			// The point is behind the camera, and can therefore not be seen
			return WEIGHT_V_INFINITY;
		}
	}

	// Calculate the Texture Coordinates
	GetTextureCoords( label, &first_triangle_texture_coordinates );
	first_triangle_integral_coords_from_to[0] = first_triangle_texture_coordinates[adjacent_vertices_array_indices[0].first];
	first_triangle_integral_coords_from_to[1] = first_triangle_texture_coordinates[adjacent_vertices_array_indices[1].first];

	// Test the texture coordinates of the first triangle
	if (first_triangle_integral_coords_from_to[0].mU < TEXCOORD_MIN || first_triangle_integral_coords_from_to[0].mU > TEXCOORD_MAX ||
		first_triangle_integral_coords_from_to[0].mV < TEXCOORD_MIN || first_triangle_integral_coords_from_to[0].mV > TEXCOORD_MAX ||
		first_triangle_integral_coords_from_to[1].mU < TEXCOORD_MIN || first_triangle_integral_coords_from_to[1].mU > TEXCOORD_MAX ||
		first_triangle_integral_coords_from_to[1].mV < TEXCOORD_MIN || first_triangle_integral_coords_from_to[1].mV > TEXCOORD_MAX)
		return WEIGHT_V_INFINITY;

	// test if edge of triangle 1 is inside the image mask
	if( !label->GetMaskPath().empty() )
	{
		if( !IsEdgeInsideMask(adjacent_vertices_array_indices[0].first,adjacent_vertices_array_indices[1].first, *label) )
			return WEIGHT_V_INFINITY;
	}

	if (label->GetUsedImageAreaRadius() > 0.0)
	{
		const double MIDPOINT_X = 0.5;
		const double MIDPOINT_Y = 0.5;
		
		const double MAX_DISTANCE =
			(label->GetUsedImageAreaRadius() * MIDPOINT_X);

		const double MAX_DISTANCE_SQUARED = MAX_DISTANCE * MAX_DISTANCE;

		double delta_x;
		double delta_y;
		double distance_squared;
		double ratio =
			static_cast<double>( label->GetCameraModel()->GetIntrinsicParameters().mImageWidthInPixels ) /
			static_cast<double>( label->GetCameraModel()->GetIntrinsicParameters().mImageHeightInPixels );

		double scale_x;
		double scale_y;

		if (ratio > 1.0)
		{
			scale_x = ratio;
			scale_y = 1.0;
		}
		else
		{
			scale_x = 1.0;
			scale_y = 1.0 / ratio;
		}

		for (MBuint i = 0; i < 2; i++)
		{
			delta_x = scale_x * (first_triangle_integral_coords_from_to[i].mU - MIDPOINT_X);
			delta_y = scale_y * (first_triangle_integral_coords_from_to[i].mV - MIDPOINT_Y);
			distance_squared = delta_x * delta_x + delta_y * delta_y;

			if (distance_squared > MAX_DISTANCE_SQUARED)
				return WEIGHT_V_INFINITY;
		}
	}

	neighborNode->GetTextureCoords( neighborLabel, &second_triangle_texture_coordinates );
	second_triangle_integral_coords_from_to[0] = second_triangle_texture_coordinates[adjacent_vertices_array_indices[0].second];
	second_triangle_integral_coords_from_to[1] = second_triangle_texture_coordinates[adjacent_vertices_array_indices[1].second];


	if (second_triangle_integral_coords_from_to[0].mU < TEXCOORD_MIN || second_triangle_integral_coords_from_to[0].mU > TEXCOORD_MAX ||
		second_triangle_integral_coords_from_to[0].mV < TEXCOORD_MIN || second_triangle_integral_coords_from_to[0].mV > TEXCOORD_MAX ||
		second_triangle_integral_coords_from_to[1].mU < TEXCOORD_MIN || second_triangle_integral_coords_from_to[1].mU > TEXCOORD_MAX ||
		second_triangle_integral_coords_from_to[1].mV < TEXCOORD_MIN || second_triangle_integral_coords_from_to[1].mV > TEXCOORD_MAX)
		return WEIGHT_V_INFINITY;

	// test if edge of triangle 2 is inside the image mask
	if( !neighborLabel->GetMaskPath().empty() )
	{
		if( !neighborNode->IsEdgeInsideMask(adjacent_vertices_array_indices[0].second,adjacent_vertices_array_indices[1].second, *neighborLabel) )
			return WEIGHT_V_INFINITY;
	}

	if (neighborLabel->GetUsedImageAreaRadius() > 0.0)
	{
		const double MIDPOINT_X = 0.5;
		const double MIDPOINT_Y = 0.5;

		const double MAX_DISTANCE =
			(neighborLabel->GetUsedImageAreaRadius() * MIDPOINT_X);

		const double MAX_DISTANCE_SQUARED = MAX_DISTANCE * MAX_DISTANCE;

		double delta_x;
		double delta_y;
		double distance_squared;

		double ratio =
			static_cast<double>( neighborLabel->GetCameraModel()->GetIntrinsicParameters().mImageWidthInPixels ) /
			static_cast<double>( neighborLabel->GetCameraModel()->GetIntrinsicParameters().mImageHeightInPixels );

		double scale_x;
		double scale_y;

		if (ratio > 1.0)
		{
			scale_x = ratio;
			scale_y = 1.0;
		}
		else
		{
			scale_x = 1.0;
			scale_y = 1.0 / ratio;
		}

		for (MBuint i = 0; i < 2; i++)
		{
			delta_x = scale_x * (second_triangle_integral_coords_from_to[i].mU - MIDPOINT_X);
			delta_y = scale_y * (second_triangle_integral_coords_from_to[i].mV - MIDPOINT_Y);
			distance_squared = delta_x * delta_x + delta_y * delta_y;

			if (distance_squared > MAX_DISTANCE_SQUARED)
				return WEIGHT_V_INFINITY;
		}
	}

	// Test the texture coordinates of the second triangle

//	bool label_sees_node_edge_from_behind = (GetSquaredSineViewNormal( label, labelingOptions, &adjacent_vertices_array_indices[0].first, &adjacent_vertices_array_indices[1].first ) == WEIGHT_D_INFINITY);
	bool label_sees_node_edge_from_behind = (GetSquaredSineViewNormal( label, labelingOptions, &adjacent_vertices_array_indices[0].first, &adjacent_vertices_array_indices[1].first ) == MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL));
//	bool label_sees_neighbor_node_edge_from_behind = (neighborNode->GetSquaredSineViewNormal( label, labelingOptions, &adjacent_vertices_array_indices[0].second, &adjacent_vertices_array_indices[1].second ) == WEIGHT_D_INFINITY);
	bool label_sees_neighbor_node_edge_from_behind = (neighborNode->GetSquaredSineViewNormal( label, labelingOptions, &adjacent_vertices_array_indices[0].second, &adjacent_vertices_array_indices[1].second ) == MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL));
	bool first_node_edge_from_behind = label_sees_node_edge_from_behind && label_sees_neighbor_node_edge_from_behind;

	// Look from which side we are looking at the edge
	if (first_node_edge_from_behind)
		return WEIGHT_V_INFINITY;

//	bool neighbor_label_sees_node_edge_from_behind = (GetSquaredSineViewNormal( neighborLabel, labelingOptions, &adjacent_vertices_array_indices[0].first, &adjacent_vertices_array_indices[1].first ) == WEIGHT_D_INFINITY);
	bool neighbor_label_sees_node_edge_from_behind = (GetSquaredSineViewNormal( neighborLabel, labelingOptions, &adjacent_vertices_array_indices[0].first, &adjacent_vertices_array_indices[1].first ) == MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL));
//	bool neighbor_label_sees_neighbor_node_edge_from_behind = (neighborNode->GetSquaredSineViewNormal( neighborLabel, labelingOptions, &adjacent_vertices_array_indices[0].second, &adjacent_vertices_array_indices[1].second ) == WEIGHT_D_INFINITY);
	bool neighbor_label_sees_neighbor_node_edge_from_behind = (neighborNode->GetSquaredSineViewNormal( neighborLabel, labelingOptions, &adjacent_vertices_array_indices[0].second, &adjacent_vertices_array_indices[1].second ) == MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_SQUARED_SINE_VIEW_NORMAL));
	bool second_node_edge_from_behind = neighbor_label_sees_node_edge_from_behind && neighbor_label_sees_neighbor_node_edge_from_behind;

	// Look from which side we are looking at the edge
 	if (second_node_edge_from_behind)
 		return WEIGHT_V_INFINITY;

	///////////////////////////////////////////////////////////////////////////
	// Look into Distance Map if the edge is occluded in either of the images

	// For the Label itself
	distanceMap->Update( label );

	for (MBuint i = 0; i < NUM_VERTICES_PER_EDGE; i++)
	{
		current_vertex_pos = GetVertexPosition( adjacent_vertices_array_indices[i].first );
		z_viewspace = (label->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		if (z_viewspace < distanceMap->GetDepthAtCoord( first_triangle_integral_coords_from_to[i].mU, first_triangle_integral_coords_from_to[i].mV ))
		{
			// Point is occluded
			return WEIGHT_V_INFINITY;
		}
	}

	// For the Neighbor Label
	distanceMap->Update( neighborLabel );

	for (MBuint i = 0; i < NUM_VERTICES_PER_EDGE; i++)
	{
		current_vertex_pos = neighborNode->GetVertexPosition( adjacent_vertices_array_indices[i].second );
		z_viewspace = (neighborLabel->GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		if (z_viewspace < distanceMap->GetDepthAtCoord( second_triangle_integral_coords_from_to[i].mU, second_triangle_integral_coords_from_to[i].mV ))
		{
			// The point is behind the camera, and can therefore not be seen
			return WEIGHT_V_INFINITY;
		}
	}

// 	if (useOctree)
// 	{
// 		MBTimer octree_timer;
// 		MBuint edge_vertex_indices[NUM_VERTICES_PER_EDGE] = {
// 			mVertexIndices[adjacent_vertices_array_indices[0].first],
// 			mVertexIndices[adjacent_vertices_array_indices[1].first] };
// 
// 		MBuint neighbor_edge_vertex_indices[NUM_VERTICES_PER_EDGE] = {
// 			neighborNode->mVertexIndices[adjacent_vertices_array_indices[1].second],
// 			neighborNode->mVertexIndices[adjacent_vertices_array_indices[0].second] };
// 
// 		if (indicesInIntersectingSightTriangles.find( MBCameraPosIDWithVertexIndices( label->GetCameraPosID(), edge_vertex_indices[0], edge_vertex_indices[1] ) ) != indicesInIntersectingSightTriangles.end() ||
// 			indicesInIntersectingSightTriangles.find( MBCameraPosIDWithVertexIndices( neighborLabel->GetCameraPosID(), neighbor_edge_vertex_indices[0], neighbor_edge_vertex_indices[1] ) ) != indicesInIntersectingSightTriangles.end() )
// 		{
// 			ms_AccumTimeForOcclusionDetectionSeconds += octree_timer.Toc();
// 			return WEIGHT_V_INFINITY;
// 		}
// 
// 		ms_AccumTimeForOcclusionDetectionSeconds += octree_timer.Toc();
// 	}
	///////////////////////////////////////////////////////////////////////////

	// Same labels -> Weight == 0.0 ( This must be after the if-statements above!! )
	if (label == neighborLabel)
		return 0.0;

	set<string> untouchable_images;
	untouchable_images.insert( label->GetUndistortedImagePath() );
	untouchable_images.insert( neighborLabel->GetUndistortedImagePath() );

	// Get the image data
	const Mat *image = MBImageManager::Get().GetImage( label->GetUndistortedImagePath(), imagePyramidLevel, &untouchable_images );
	const Mat *neighbor_image = MBImageManager::Get().GetImage( neighborLabel->GetUndistortedImagePath(), imagePyramidLevel, &untouchable_images );

	assert( image && image->data && neighbor_image && neighbor_image->data );

	// Calculate the number of necessary samples, so that every pixel is sampled along the edge
	MBuint num_samples;
	
	if (numSamples == -1)
	{
		num_samples	= static_cast<MBuint>( ceil( max( image->rows * abs( first_triangle_integral_coords_from_to[0].mV - first_triangle_integral_coords_from_to[1].mV ), 
				image->cols * abs( first_triangle_integral_coords_from_to[0].mU - first_triangle_integral_coords_from_to[1].mU ) ) ) ) + 1u;

		num_samples = max( num_samples, static_cast<MBuint>( ceil( max( image->rows * abs( second_triangle_integral_coords_from_to[0].mV - second_triangle_integral_coords_from_to[1].mV ), 
			image->cols * abs( second_triangle_integral_coords_from_to[0].mU - second_triangle_integral_coords_from_to[1].mU ) ) ) ) + 1u );
	}
	else
	{
		num_samples = numSamples;
	}

	double red;
	double green;
	double blue;

	double neighbor_red;
	double neighbor_green;
	double neighbor_blue;

	MBTextureCoordinate coordinate( first_triangle_integral_coords_from_to[0] );
	MBTextureCoordinate increment( (first_triangle_integral_coords_from_to[1] - first_triangle_integral_coords_from_to[0]) / static_cast<double>(num_samples - 1u) );

	MBTextureCoordinate neighbor_coordinate( second_triangle_integral_coords_from_to[0] );
	MBTextureCoordinate neighbor_increment( (second_triangle_integral_coords_from_to[1] - second_triangle_integral_coords_from_to[0]) / static_cast<double>(num_samples - 1u) );

	double sum_euclidean_distances = 0.0;
	double delta_red;
	double delta_green;
	double delta_blue;

	// Calculate the integral
	for (MBuint i = 0; i < num_samples; i++)
	{
		GetColorValueFromImage( image, coordinate, &red, &green, &blue, filteringMode );
		GetColorValueFromImage( neighbor_image, neighbor_coordinate, &neighbor_red, &neighbor_green, &neighbor_blue, filteringMode );

		delta_red = red - neighbor_red;
		delta_green = green - neighbor_green;
		delta_blue = blue - neighbor_blue;
		// The square-root is absolutely essentially to be a metric, v_a_a + v_b_c <= v_a_c + v_b_a
		sum_euclidean_distances += sqrt( delta_red * delta_red + delta_green * delta_green + delta_blue * delta_blue );

		coordinate += increment;
		neighbor_coordinate += neighbor_increment;
	}

	return (sum_euclidean_distances / num_samples) + ROUNDING_ERROR_COMPENSATION;
}

/**
  *	Returns a boolean value indicating whether this triangle is infinitely thin
  * or not
  *
  * @return boolean value indicating whether this triangle is infinitely thin or not
  */
bool MBTriangle::IsInfinitelyThin( void ) const
{
	return mInfinitelyThin;
}

/**
  * Returns true if this triangle is entirely inside the given camera view and 
  * it is seen from the front, false otherwise
  *
  * @param cameraView camera view that is used to project this MBTriangle into it
  * @return true if this triangle is entirely inside the given camera view, false otherwise
  */
bool MBTriangle::IsTriangleCompletelyInside( const MBCameraView &cameraView, MBDistanceMap *distanceMap ) const
{
	const double *current_vertex_pos = 0;

	vector<MBVec4> vertices;
	vector<MBVec2> image_points;

	double z_viewspace;

	const MBCameraModel *camera_model = cameraView.GetCameraModel();

	vertices.reserve( NUM_VERTICES_PER_TRIANGLE );
	image_points.reserve( NUM_VERTICES_PER_TRIANGLE );

	for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		current_vertex_pos = GetVertexPosition( i );

		vertices.push_back( MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ) );
		z_viewspace = (cameraView.GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		if (z_viewspace >= 0.0)
		{
			// The point is behind the camera, and can therefore not be seen
			return false;
		}
	}

	// Project the points onto the image Plane
	ProjectPoints(  vertices,
		cameraView.GetWorldSpaceToViewSpaceTransformation(),
		camera_model->GetCameraMatrix(),
		&image_points,
		camera_model );

	const double X_MIN = 0.0;
	const double Y_MIN = 0.0;
	const double X_MAX = static_cast<double>( camera_model->GetIntrinsicParameters().mImageWidthInPixels );
	const double Y_MAX = static_cast<double>( camera_model->GetIntrinsicParameters().mImageHeightInPixels );

	const MBVec2 &shift = cameraView.GetTexcoordShiftVector();

	if (shift[X] != 0.0 || shift[Y] != 0.0)
	{
		for (vector<MBVec2>::iterator point_it = image_points.begin();
			point_it != image_points.end(); point_it++)
		{
			(*point_it)[X] += shift[X] * X_MAX;
			(*point_it)[Y] += shift[Y] * Y_MAX;
		}
	}

	// Check, whether the image points are all inside the full image
	for (vector<MBVec2>::const_iterator point_it = image_points.begin();
		point_it != image_points.end(); point_it++)
	{
		if ((*point_it)[X] < X_MIN || (*point_it)[X] >= X_MAX ||
			(*point_it)[Y] < Y_MIN || (*point_it)[Y] >= Y_MAX)
			return false;
	}

	if (cameraView.GetUsedImageAreaRadius() > 0.0)
	{
		const double MIDPOINT_X = X_MAX * 0.5;
		const double MIDPOINT_Y = Y_MAX * 0.5;
		const double SHORTER_SIDE =
			MIDPOINT_X < MIDPOINT_Y ? MIDPOINT_X : MIDPOINT_Y;

		const double MAX_DISTANCE =
			(cameraView.GetUsedImageAreaRadius() * SHORTER_SIDE);

		const double MAX_DISTANCE_SQUARED = MAX_DISTANCE * MAX_DISTANCE;

		double delta_x;
		double delta_y;
		double distance_squared;

		// Check, whether the image points are all inside the set radius
		for (vector<MBVec2>::const_iterator point_it = image_points.begin();
			point_it != image_points.end(); point_it++)
		{
			delta_x = (*point_it)[X] - MIDPOINT_X;
			delta_y = (*point_it)[Y] - MIDPOINT_Y;
			distance_squared = delta_x * delta_x + delta_y * delta_y;

			if (distance_squared > MAX_DISTANCE_SQUARED)
				return false;
		}
	}
	
	// Check, whether the triangle is occluded

	// Look into Distance Map here
	distanceMap->Update( &cameraView );

	for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		current_vertex_pos = GetVertexPosition( i );
		z_viewspace = (cameraView.GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		double u = image_points[i][X] / X_MAX;
		double v = image_points[i][Y] / Y_MAX;

		if (z_viewspace < distanceMap->GetDepthAtCoord( u, v ))
		{
			// The point is occluded
			return false;
		}
	}

// 	if (useOctree)
// 	{
// 		MBTimer octree_timer;
// 
// 		
// 		if (mOctree->IsOtherTriangleIntersectingSightFrustum( mVertexIndices, cameraView.GetPosition() ))
// 		{
// 			// This MBTriangle can not be seen by the cameraView, because there is
// 			// another MBTriangle between the viewing plane and this MBTriangle
// 			ms_AccumTimeForOcclusionDetectionSeconds += octree_timer.Toc();
// 			return false;
// 		}
// 		ms_AccumTimeForOcclusionDetectionSeconds += octree_timer.Toc();
// 	}

	return true;
}

/** Generate all points between two given points by linear interpolation 
 *  @param[in,out] points is a vector with two points to which its interpolated points are added
 *	
 */
void MBTriangle::linearInterpolation(std::vector<MBVec2> *points) const
{
	double p0_x, p0_y, p1_x, p1_y;

	// check on which axis the points differ the most
	if( abs(points->at(0)[X]-points->at(1)[X]) > abs(points->at(0)[Y]-points->at(1)[Y]) )
	{
		// step along x axis
		if(points->at(0)[X] < points->at(1)[X]) {
			p0_x = points->at(0)[X];
			p0_y = points->at(0)[Y];
			p1_x = points->at(1)[X];
			p1_y = points->at(1)[Y];
		}else{
			p0_x = points->at(1)[X];
			p0_y = points->at(1)[Y];
			p1_x = points->at(0)[X];
			p1_y = points->at(0)[Y];
		}

		double y;
		double formula_part = (p1_y-p0_y)/(p1_x-p0_x);

		for(int x=static_cast<int>(p0_x+1); x<p1_x; x++)
		{
			y = p0_y + formula_part * (x-p0_x);
			y = static_cast<int>(y);
			points->push_back(MBVec2(x,y));
		}

	}else{
		// step along y axis
		if(points->at(0)[Y] < points->at(1)[Y]) {
			p0_x = points->at(0)[X];
			p0_y = points->at(0)[Y];
			p1_x = points->at(1)[X];
			p1_y = points->at(1)[Y];
		}else{
			p0_x = points->at(1)[X];
			p0_y = points->at(1)[Y];
			p1_x = points->at(0)[X];
			p1_y = points->at(0)[Y];
		}

		double x;
		double formula_part = (p1_x-p0_x)/(p1_y-p0_y);

		for(int y=static_cast<int>(p0_y+1); y<p1_y; y++)
		{
			x = p0_x + formula_part * (y-p0_y);
			x = static_cast<int>(x);
			points->push_back(MBVec2(x,y));
		}
	}

	// sort points
	std::stable_sort(points->begin(),points->end(),MBVec2::compareMBVec2); 
}


/** Get the projected points of the triangle
 *  @param[out] image_points returns the projected points of the triangle or a empty vector if the triangle is not fully in front of
 *  the camera
 */
void MBTriangle::GetProjectedPoints(std::vector<MBVec2> &image_points, const MBCameraView &cameraView) const
{
	const double *current_vertex_pos = 0;

	vector<MBVec4> vertices;

	double z_viewspace;

	const MBCameraModel *camera_model = cameraView.GetCameraModel();

	vertices.reserve( NUM_VERTICES_PER_TRIANGLE );
	//image_points.reserve( NUM_VERTICES_PER_TRIANGLE );

	for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		current_vertex_pos = GetVertexPosition( i );

		vertices.push_back( MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ) );
		z_viewspace = (cameraView.GetWorldSpaceToViewSpaceTransformation() * MBVec4( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2], 1.0 ))[Z];

		if (z_viewspace >= 0.0)
		{
			// The point is behind the camera, and can therefore not be seen
			return;
		}
	}

	// Project the points onto the image Plane
	ProjectPoints(  vertices,
		cameraView.GetWorldSpaceToViewSpaceTransformation(),
		camera_model->GetCameraMatrix(),
		&image_points,
		camera_model );

	const double X_MIN = 0.0;
	const double Y_MIN = 0.0;
	const double X_MAX = static_cast<double>( camera_model->GetIntrinsicParameters().mImageWidthInPixels );
	const double Y_MAX = static_cast<double>( camera_model->GetIntrinsicParameters().mImageHeightInPixels );

	const MBVec2 &shift = cameraView.GetTexcoordShiftVector();

	if (shift[X] != 0.0 || shift[Y] != 0.0)
	{
		for (vector<MBVec2>::iterator point_it = image_points.begin();
			point_it != image_points.end(); point_it++)
		{
			(*point_it)[X] += shift[X] * X_MAX;
			(*point_it)[Y] += shift[Y] * Y_MAX;

			(*point_it)[X] = static_cast<int>((*point_it)[X]);
			(*point_it)[Y] = static_cast<int>((*point_it)[Y]);
		}
	}

}


/** get all points of the projected triangle boundary 
 *  @param[out] triangleBoundary is a list of points of the projected triangle boundary
 */
void MBTriangle::GetProjectedTriangleBoundary(std::list<MBVec2> &triangleBoundary, const MBCameraView &cameraView) const
{
	vector<MBVec2> projectedPoints;
	projectedPoints.reserve( NUM_VERTICES_PER_TRIANGLE );

	// get the triangle points projected onto the image plane
	GetProjectedPoints(projectedPoints,cameraView);

	if( projectedPoints.empty() )
		return;

	vector<MBVec2> triangleEdge;
	triangleEdge.clear();
	
	// compute the first edge of the triangle
	triangleEdge.push_back(projectedPoints[0]);
	triangleEdge.push_back(projectedPoints[1]);
	linearInterpolation(&triangleEdge);

	triangleBoundary.insert(triangleBoundary.end(),triangleEdge.begin(),triangleEdge.end());
	triangleEdge.clear();

	// compute the second edge of the triangle
	triangleEdge.push_back(projectedPoints[1]);
	triangleEdge.push_back(projectedPoints[2]);
	linearInterpolation(&triangleEdge);

	triangleBoundary.insert(triangleBoundary.end(),triangleEdge.begin(),triangleEdge.end());
	triangleEdge.clear();

	// compute the third edge of the triangle
	triangleEdge.push_back(projectedPoints[0]);
	triangleEdge.push_back(projectedPoints[2]);
	linearInterpolation(&triangleEdge);

	triangleBoundary.insert(triangleBoundary.end(),triangleEdge.begin(),triangleEdge.end());
	triangleEdge.clear();

	std::stable_sort(triangleBoundary.begin(),triangleBoundary.end(),MBVec2::compareMBVec2); 
	triangleBoundary.unique();
}

/** Check if the projected points of the triangle lie inside the image mask 
 *  @return returns true if the projected triangle is completely inside the mask, otherwise false
 */
bool MBTriangle::IsTriangleInsideMask(const MBCameraView &cameraView) const
{
	std::list<MBVec2> triangleBoundary;
	const cv::Mat *mask;

	// get the boundary points of the projected triangle
	GetProjectedTriangleBoundary(triangleBoundary, cameraView);

	if( triangleBoundary.empty() )
		return false;

	bool same_col = false;
	
	for (std::list<MBVec2>::iterator prev_point_it = triangleBoundary.begin(), point_it = ++triangleBoundary.begin(); 
		 point_it != triangleBoundary.end(); ++point_it)
	{

		if( (*prev_point_it)[X] == (*point_it)[X] )
		{
			if( !same_col ){
				same_col = true;
				prev_point_it++;
			}
			else{
				prev_point_it = triangleBoundary.erase(prev_point_it);
			}
		}else{
			same_col = false;
			prev_point_it++;
		}


	}

	mask = MBImageManager::Get().GetImage( cameraView.GetMaskPath() );
	int x,y;

	// check if triangle is inside the mask
	for (std::list<MBVec2>::iterator prev_point_it = triangleBoundary.begin(), point_it = ++triangleBoundary.begin(); 
		 prev_point_it != triangleBoundary.end(); ++point_it, ++prev_point_it)
	{
		if( point_it == triangleBoundary.end() || (*prev_point_it)[X] != (*point_it)[X] )
		{
			x = static_cast<int>((*prev_point_it)[X]);
			y = static_cast<int>((*prev_point_it)[Y]);

			if( static_cast<int>((*mask).at<cv::Vec3b>(y,x)[0]) != 255 || 
				static_cast<int>((*mask).at<cv::Vec3b>(y,x)[1]) != 255 || 
				static_cast<int>((*mask).at<cv::Vec3b>(y,x)[2]) != 255 ) 
				return false;

			if( point_it == triangleBoundary.end() )
				break;

		}else{
			int y_end;
			x = static_cast<int>((*prev_point_it)[X]);

			for( y=static_cast<int>((*prev_point_it)[Y]), y_end=static_cast<int>((*point_it)[Y]); y<=y_end; y++ )
			{
				if( static_cast<int>((*mask).at<cv::Vec3b>(y,x)[0]) != 255 || 
					static_cast<int>((*mask).at<cv::Vec3b>(y,x)[1]) != 255 || 
					static_cast<int>((*mask).at<cv::Vec3b>(y,x)[2]) != 255) {
					return false;
				}

			}
			++prev_point_it;
			++point_it;

			if( point_it == triangleBoundary.end() )
				break;
		}
		
	}
	
	return true;
}

/** check if all given points are inside the image mask 
 * @param[in] points, points of the image
 * @param[in] camerView, object is used to get the image mask
 * @return returns true if all points are inside the mask, else false
 */
bool MBTriangle::PointsInsideMask(const std::vector<MBVec2> &points, const MBCameraView &cameraView)
{
	const cv::Mat *mask = MBImageManager::Get().GetImage( cameraView.GetMaskPath() );
	int x,y;

	for (std::vector<MBVec2>::const_iterator point_it = points.begin(); point_it != points.end(); ++point_it)
	{
		x = static_cast<int>((*point_it)[X]);
		y = static_cast<int>((*point_it)[Y]);

		if( static_cast<int>((*mask).at<cv::Vec3b>(y,x)[0]) != 255 || 
			static_cast<int>((*mask).at<cv::Vec3b>(y,x)[1]) != 255 || 
			static_cast<int>((*mask).at<cv::Vec3b>(y,x)[2]) != 255 ) 
			return false;
				
	}

	return true;
}

/** get the projected points along an edge, given by two vertices 
 * @param[in] points, vector that holds the two triangle vertices of the edge
 * @param[out] projectedEdge, all projected points along the triangle edge
 * @param[in] cameraView, camera view to project the vertices
 *
void MBTriangle::GetProjectedEdge(const std::vector<MBVec3> &points, std::vector<MBVec2> &projectedEdge, const MBCameraView &cameraView)
{
}*/

/** check if the projected edge of the triangle, given by two of the triangle's vertex indices, lies inside the image mask 
 * @param[in] indexVertex1, index of one triangle vertex
 * @param[in] indexVertex2, index of one triangle vertex
 * @param[in] cameraView, to project the vertices. the object is also used to get the image mask
 * @return returns true if all points along the projected triangle edge are inside the image mask, else false
 */
bool  MBTriangle::IsEdgeInsideMask(const MBuint &indexVertex1, const MBuint &indexVertex2, const MBCameraView &cameraView) const
{
	vector<MBVec2> projectedEdge;
	vector<MBVec2> projectedTrianglePoints;

	projectedTrianglePoints.reserve(NUM_VERTICES_PER_TRIANGLE);

	// get the projected triangle points
	GetProjectedPoints(projectedTrianglePoints,cameraView);

	// get the projected triangle points of the edge
	projectedEdge.push_back(projectedTrianglePoints[indexVertex1]);
	projectedEdge.push_back(projectedTrianglePoints[indexVertex2]);

	// calculate the points along the edge
	MBTriangle::linearInterpolation(&projectedEdge);

	// check if all points on the edge are inside the mask
	return MBTriangle::PointsInsideMask(projectedEdge,cameraView);
}

/** get the ratio of the triangle area to the projected triangle area in square meters per square texel
 * @param[in] cameraView, used to calulate the projected triangle
 * @return ratio of the triangle area to the projected triangle area in square meters per square texel
 */
double MBTriangle::GetRealAreaPerProjectedArea(const MBCameraView &cameraView) const
{
	MBVec3 imgCrossProduct;
	MBVec3 modelCrossProduct;

	vector<MBVec3> modelTriangle;
	vector<MBVec2> imgTriangle;

	double projectedArea, realArea;

	modelTriangle.reserve(NUM_VERTICES_PER_TRIANGLE);
	imgTriangle.reserve(NUM_VERTICES_PER_TRIANGLE);

	const double *current_vertex_pos = 0;

	// TODO START CHECK IF TRIANGLE BEHIND
	// Calculate center of mass of the triangle

	const MBVec3 &camera_pos = cameraView.GetPosition();

	double center_of_mass[NUM_VERTICES_PER_TRIANGLE]  = { 0.0, 0.0, 0.0 };


	for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		current_vertex_pos = GetVertexPosition( i );
		
		center_of_mass[X] += current_vertex_pos[X];
		center_of_mass[Y] += current_vertex_pos[Y];
		center_of_mass[Z] += current_vertex_pos[Z];
	}

	center_of_mass[X] /= 3.0;
	center_of_mass[Y] /= 3.0;
	center_of_mass[Z] /= 3.0;

	const double *p_normal = GetNormal();

	cv::Vec3d normal( p_normal[X], p_normal[Y], p_normal[Z] );
	cv::Vec3d view_vector(
		camera_pos[X] - center_of_mass[X],
		camera_pos[Y] - center_of_mass[Y],
		camera_pos[Z] - center_of_mass[Z] );

	double distance = cv::norm( view_vector );

	view_vector[X] /= distance;
	view_vector[Y] /= distance;
	view_vector[Z] /= distance;

	double dot_product = normal.dot( view_vector );

	if (dot_product <= 0.0)
	{
		// The Triangle is looked at from behind
//		return WEIGHT_D_INFINITY;
		return MBWeightMethodManager::Get().GetMethodWeightInfinity(MB_REAL_TO_PROJECTED_AREA_RATIO);
	}
	// TODO END CHECK IF TRIANGLE BEHIND

	GetProjectedPoints(imgTriangle,cameraView);

	for (MBuint i = 0; i < NUM_VERTICES_PER_TRIANGLE; i++)
	{
		current_vertex_pos = GetVertexPosition( i );

		modelTriangle.push_back( MBVec3( current_vertex_pos[0], current_vertex_pos[1], current_vertex_pos[2]) );
	}

	modelCrossProduct = MBVec3::Cross( modelTriangle[1]-modelTriangle[0], modelTriangle[2]-modelTriangle[0] );

	imgCrossProduct = MBVec3::Cross( MBVec3(0.0,(imgTriangle[1])[0],(imgTriangle[1])[1]) - MBVec3(0.0,(imgTriangle[0])[0],(imgTriangle[0])[1]),
		                             MBVec3(0.0,(imgTriangle[2])[0],(imgTriangle[2])[1]) - MBVec3(0.0,(imgTriangle[0])[0],(imgTriangle[0])[1]) );

	realArea = cv::sqrt(modelCrossProduct[0]*modelCrossProduct[0] +
		                modelCrossProduct[1]*modelCrossProduct[1] +
						modelCrossProduct[2]*modelCrossProduct[2]);

	// get size of the area in square millimeter instead of square meter
	realArea *= 1000000;

	projectedArea = abs(imgCrossProduct[0]);

//	if( realArea/projectedArea >= WEIGHT_D_INFINITY)
//		cout<< "realArea/projectedArea: " << realArea/projectedArea << endl;

	return realArea/projectedArea;
}