/*
 *  polygonizer.cpp
 *  Fracture
 *
 *  Created by Jamie Portsmouth on 08/05/2011.
 *  Copyright 2011 __MyCompanyName__. All rights reserved.
 *
 */


// polygonize junk


#if 0

template<typename Functor>
struct ProjectedEdge
{
	// A projected edge defined by splitting the 3d line segment A-B into numSegments sub-segments, 
	// then projecting the endpoints of each onto the isosurface.
	ProjectedEdge(V3f A, V3f B, bool expand, int numSegments=4) : m_numSegments(numSegments), m_expand(expand)
	{			
		assert(numSegments>0);
		
		V3f AB = B - A;
		float edgeLength = AB.length();
		float segmentLength = edgeLength/float(numSegments);
		AB /= edgeLength;
		
		m_interiorProjectedPoints.resize(numSegments-1);
		for (int n=0; n<m_interiorProjectedPoints.size(); ++n)
		{
			m_interiorProjectedPoints[n] = A + float(n+1)*segmentLength*AB;
		}
		
		const int maxIterations = 100;
		float distanceEpsilon = 1.0e-3f * segmentLength;
		m_projectedEndpointA = project<Func>(A, FLT_EPSILON, distanceEpsilon, maxIterations);
		m_projectedEndpointB = project<Func>(B, FLT_EPSILON, distanceEpsilon, maxIterations);
		for (int n=0; n<m_interiorProjectedPoints.size(); ++n)
		{
			m_interiorProjectedPoints[n] = project<Func>(m_interiorProjectedPoints[n], FLT_EPSILON, distanceEpsilon, maxIterations);
		}
		
		if (expand)
		{
			m_radiusOfCurvatures.resize(numSegments+1);
			m_radiusOfCurvatures[0] = rocAtPoint<Func>(m_projectedEndpointA);
			m_radiusOfCurvatures[numSegments] = rocAtPoint<Func>(m_projectedEndpointB);
			for (int n=0; n<m_interiorProjectedPoints.size(); ++n)
			{
				m_radiusOfCurvatures[n+1] = rocAtPoint<Func>(m_interiorProjectedPoints[n]);
			}
		}
		
		// TO DO: refactor - stupid to have special cases for the start and end segments...
		//  just keep all segments in a single vector
		
		// Build segment BVH
		vector<Box3f> leafAabbs;
		vector<int> leafDatas;
		{
			Box3f firstSegmentBox;
			if (expand)
			{
				firstSegmentBox = makeExpandedSegmentBox(m_projectedEndpointA, m_interiorProjectedPoints[0], 
														 m_radiusOfCurvatures[0], m_radiusOfCurvatures[1]);
			}
			else
			{
				firstSegmentBox = makeSegmentBox(m_projectedEndpointA, m_interiorProjectedPoints[0]);
			}
			
			leafAabbs.push_back(firstSegmentBox);
			leafDatas.push_back(0);
		}
		
		if (numSegments>1)
		{				
			Box3f lastSegmentBox;
			if (expand)
			{
				lastSegmentBox = makeExpandedSegmentBox(m_interiorProjectedPoints[m_interiorProjectedPoints.size()-1], m_projectedEndpointB, 
														m_radiusOfCurvatures[numSegments-1], m_radiusOfCurvatures[numSegments]);
			}
			else
			{
				lastSegmentBox = makeSegmentBox(m_interiorProjectedPoints[m_interiorProjectedPoints.size()-1], m_projectedEndpointB);
			}
			
			leafAabbs.push_back(lastSegmentBox);
			leafDatas.push_back(numSegments-1);
		}
		
		for (int n=0; n<m_interiorProjectedPoints.size()-1; ++n)
		{
			Box3f segmentBox;
			if (expand)
			{
				segmentBox = makeExpandedSegmentBox(m_interiorProjectedPoints[n], m_interiorProjectedPoints[n+1], 
													m_radiusOfCurvatures[n+1], m_radiusOfCurvatures[n+2]);
			}
			else
			{
				segmentBox = makeSegmentBox(m_interiorProjectedPoints[n], m_interiorProjectedPoints[n+1]);
			}
			
			leafAabbs.push_back(segmentBox);
			leafDatas.push_back(n+1);
		}
		m_segmentBVH.buildTreeTopDown(leafAabbs, leafDatas);
	}
	
	inline Box3f makeExpandedSegmentBox(const V3f& A, const V3f& B, bool expand, float rA, float rB)
	{
		Box3f box, boxA, boxB;
		box.makeEmpty();
		V3f vA(rA), vB(rB);
		boxA.extendBy(A);
		boxB.extendBy(B);
		boxA.max += vA;
		boxA.min -= vA;
		boxB.max += vB;
		boxB.min -= vB;
		box.extendBy(boxA);
		box.extendBy(boxB);
		return box;
	}
	
	inline Box3f makeSegmentBox(const V3f& A, const V3f& B)
	{
		Box3f box;
		box.makeEmpty();
		box.extendBy(A);
		box.extendBy(B);
		return box;
	}
	
	void getSegmentEndpoints(int segmentIndex, V3f& E0, V3f& E1)
	{
		if (segmentIndex==0)
		{
			E0 = m_projectedEndpointA;
			E1 = m_interiorProjectedPoints[0];
		}
		else if (segmentIndex==m_numSegments-1)
		{
			E0 = m_interiorProjectedPoints[m_numSegments-2];
			E1 = m_projectedEndpointB;
		}
		else
		{
			E0 = m_interiorProjectedPoints[segmentIndex-1];
			E1 = m_interiorProjectedPoints[segmentIndex];
		}
	}
	
	
	static inline float edgeEdgeDistanceSqr(const V3f& E0_sta, const V3f& E0_end, const V3f& E1_sta, const V3f& E1_end,
											float &s, float&t, V3f& c1, V3f& c2)
	{			
		// From Ericson.
		const Imath::V3f& p1 = E0_sta;
		const Imath::V3f& q1 = E0_end;
		const Imath::V3f& p2 = E1_sta;
		const Imath::V3f& q2 = E1_end;
		V3f d1 = q1 - p1; // Direction vector of segment S1
		V3f d2 = q2 - p2; // Direction vector of segment S2
		V3f r = p1 - p2;
		float a = d1 ^ d1; // Squared length of segment S1, always nonnegative
		float e = d2 ^ d2; // Squared length of segment S2, always nonnegative
		float f = d2 ^ r;
		
		if (a<=FLT_EPSILON && e<=FLT_EPSILON)
		{	// Both segments degenerate into points
			s = t = 0.0f;
			c1 = p1;
			c2 = p2;
			return (c1-c2)^(c1-c2);
		}
		
		if (a<=FLT_EPSILON)
		{   // First segment degenerates into a point
			s = 0.0f;
			t = f/e;
			t = clamp(t, 0.0f, 1.0f);
		}
		else
		{
			float c = d1^r;
			if (e<=FLT_EPSILON)
			{   // Second segment degenerates into a point
				t = 0.0f;
				s = clamp(-c/a, 0.0f, 1.0f);
			}
			else
			{   // The general nondegenerate case starts here
				float b = d1^d2;
				float denom = a*e - b*b;
				
				// If segments non parallel, compute closest point on L1 to L2 and
				// clamp to segment S1. Else pick arbitrary s (here 0)
				if (denom != 0.0f)
					s = clamp((b*f - c*e)/denom, 0.0f, 1.0f);
				else 
					s = 0.0f;
				
				// Compute point on L2 closest to S1(s)
				t = (b*s + f)/e;
				
				// If t in [0,1] done. Else clamp t, recompute s for the new value of t
				// and clamp s to [0,1]
				if (t<0.0f)
				{
					t = 0.0f;
					s = clamp(-c/a, 0.0f, 1.0f);
				}
				else if (t>1.0f) 
				{
					t = 1.0f;
					s = clamp((b-c)/a, 0.0f, 1.0f);
				}
			}
		}
		c1 = p1 + d1*s;
		c2 = p2 + d2*t;
		return (c1-c2)^(c1-c2);
	}
	
	
	
	// Returns true if the closest points of the segments of the two ProjectedEdges, expanded by their r.o.cs, overlap
	bool expandedEdgesHit(const ProjectedEdge& otherEdge)
	{
		assert(m_expand && otherEdge.m_expand);
		
		vector<AabbTreeUtils::NodePair> leafPairs;
		AabbTreeUtils::collideTrees<int, int>(m_segmentBVH, otherEdge.m_segmentBVH, leafPairs);
		
		for (int i=0; i<leafPairs.size(); ++i)
		{
			AabbTreeUtils::NodePair& leafPair = leafPairs[i];
			
			// Narrowphase
			int segmentIndex = m_segmentBVH.getLeafData(m_segmentBVH.getNode(leafPair.m_a).m_leafIndex);
			int segmentIndexOther = otherEdge.m_segmentBVH.getLeafData(otherEdge.m_segmentBVH.getNode(leafPair.m_b).m_leafIndex);
			assert(segmentIndex>=0 && segmentIndex<m_numSegments);
			assert(segmentIndexOther>=0 && segmentIndexOther<otherEdge.m_numSegments);
			
			V3f E0_sta, E0_end;
			getSegmentEndpoints(segmentIndex, E0_sta, E0_end);
			
			V3f E1_sta, E1_end;
			otherEdge.getSegmentEndpoints(segmentIndexOther, E1_sta, E1_end);
			
			float s, t;
			V3f c1, c2;
			float eeDistSqr = edgeEdgeDistanceSqr(E0_sta, E0_end, E1_sta, E1_end, s, t, c1, c2);
			
			float E0_sta_r = m_radiusOfCurvatures[segmentIndex];
			float E0_end_r = m_radiusOfCurvatures[segmentIndex+1];
			
			float E1_sta_r = otherEdge.m_radiusOfCurvatures[segmentIndexOther];
			float E1_end_r = otherEdge.m_radiusOfCurvatures[segmentIndexOther+1];
			
			float E0_interp_r = s*E0_sta_r + (1.f-s)*E0_end_r;
			float E1_interp_r = t*E1_sta_r + (1.f-t)*E1_end_r;
			
			if ( eeDistSqr < (E0_interp_r*E0_interp_r + E1_interp_r*E1_interp_r + 2.f*E0_interp_r*E1_interp_r) ) 
			{
				return true;
			}
		}
		
		return false;
	}
	
	
	// Returns true if the closest points of the segments of the two ProjectedEdges are closer than the tolerance
	bool edgesHit(const ProjectedEdge& otherEdge, float distanceTolerance)
	{
		vector<AabbTreeUtils::NodePair> leafPairs;
		AabbTreeUtils::collideTrees<int, int>(m_segmentBVH, otherEdge.m_segmentBVH, leafPairs);
		
		float distanceToleranceSqr = distanceTolerance*distanceTolerance;
		
		for (int i=0; i<leafPairs.size(); ++i)
		{
			AabbTreeUtils::NodePair& leafPair = leafPairs[i];
			
			// Narrowphase
			int segmentIndex = m_segmentBVH.getLeafData(m_segmentBVH.getNode(leafPair.m_a).m_leafIndex);
			int segmentIndexOther = otherEdge.m_segmentBVH.getLeafData(otherEdge.m_segmentBVH.getNode(leafPair.m_b).m_leafIndex);
			assert(segmentIndex>=0 && segmentIndex<m_numSegments);
			assert(segmentIndexOther>=0 && segmentIndexOther<otherEdge.m_numSegments);
			
			V3f E0_sta, E0_end;
			getSegmentEndpoints(segmentIndex, E0_sta, E0_end);
			
			V3f E1_sta, E1_end;
			otherEdge.getSegmentEndpoints(segmentIndexOther, E1_sta, E1_end);
			
			float s, t;
			V3f c1, c2;
			float eeDistSqr = edgeEdgeDistanceSqr(E0_sta, E0_end, E1_sta, E1_end, s, t, c1, c2);
			
			if (eeDistSqr < distanceToleranceSqr) 
			{
				return true;
			}
		}
		return false;
	}
	
	
	
private:
	
	V3f m_projectedEndpointA;
	V3f m_projectedEndpointB;
	vector<V3f> m_interiorProjectedPoints;
	
	int m_numSegments;
	bool m_expand;
	
	vector<float> m_radiusOfCurvatures;
	
	AabbTree<int> m_segmentBVH;
	
};




#endif



#if 0 

	// polygonizer3 junk


float computeOpeningAngle(list<Front::Vertex>::const_iterator vertex, Front& front)
{
	list<Front::Vertex>::const_iterator prevVertex = vertex; 
	prevVertex--;
	if (prevVertex == front.m_vertices.end()) prevVertex--;
	
	list<Front::Vertex>::const_iterator nextVertex = vertex; 
	nextVertex++;
	if (nextVertex == front.m_vertices.end()) nextVertex++;
	
	const V3f& V = m_mesh.m_vertices[vertex->m_index];
	const V3f& N = m_mesh.m_vertexNormals[vertex->m_index];
	
	const V3f& Vprev = m_mesh.m_vertices[prevVertex->m_index];
	const V3f& Vnext = m_mesh.m_vertices[nextVertex->m_index];
	
	V3f X, Y;
	computeTangentSpace(N, X, Y);
	
	V3f Vprev_project(0);
	Vprev_project.x = (V - Vprev) ^ X;
	Vprev_project.y = (V - Vprev) ^ Y;
	Vprev_project.normalize();
	
	V3f Vnext_project(0);
	Vnext_project.x = (Vnext - V) ^ X;
	Vnext_project.y = (Vnext - V) ^ Y;
	Vnext_project.normalize();
	
	float dot = clamp(Vprev_project ^ Vnext_project, -1.f+FLT_EPSILON, 1.f-FLT_EPSILON);
	
	float openingAngle;
	if ((Vprev_project % Vnext_project).z >= 0.0f)
	{
			// reflex angle
		openingAngle = M_PI + acos(dot);
	}
	else
	{
			// acute angle
		openingAngle = M_PI - acos(dot); 
	}
	
	return openingAngle;
}


bool growthPhaseTerminated()
{
	list<Front::Vertex>::const_iterator vertex = m_fronts[0].m_vertices.begin();
	while (vertex != m_fronts[0].m_vertices.end())
	{
		if (vertex->m_active) return false;
		vertex++;
	}
	return true;
}


template<typename Func>
bool pasteCracks()
{
	return pasteCracks<Func>(m_fronts[0], m_mesh, m_tol);
}


void sortVerticesByAngle(Front& front, vector<Front::Vertex>& sortedFrontVerticesOut)
{
	list<Front::Vertex>::const_iterator vertex = front.m_vertices.begin();
	while (vertex != front.m_vertices.end())
	{
		assert(!vertex->m_active);
		sortedFrontVerticesOut.push_back(*vertex);
		vertex++;
	}
	
	sort(sortedFrontVerticesOut.begin(), sortedFrontVerticesOut.end());
	
		//vector<Front::Vertex>::const_iterator it = sortedFrontVerticesOut.begin();
		//while (it != sortedFrontVerticesOut.end())
		//{
		//printf("%f ", (180.f/M_PI)*it->m_openingAngle);
		//it++;
		//}
}




/*
 OK - crack pasting sucks. :(
 
 - could try to fix it up, (have a look at karkanis and samir/gannouche).
 Latter looks quite do-able - though perhaps not robust or well-defined enough.
 
 - or go for a multi-front approach.
 
 The latter seems more attractive. It will presumably change the evolution
 of the geometry, possibly for the better, as the cracks go away as the mesh grows.
 Also, we need to deal with multiple fronts anyway, if we want to be able to mesh
 surfaces with disconnected parts, and/or start growth from more than one seed point.
 Leaving the crack repair to the end also just seems uglier, like deferring the main
 problem until the end for no good reason.
 
 
 
 Multi-front approach
 --------------------
 
 ***************************************************************************************
 - when two fronts 'meet', they must be merged.
 - when a given front meets itself, it must be split into two.
 ***************************************************************************************
 
 - (but locally, both situations look the same: like 2 fronts being merged)
 
 - 'meet' means, growth was attempted at a vertex but prevented due to a collision
 - so, 'resolve' the collision by fixing the geometry locally
 
 ***************************************************************************************
 - this ultimately involves connecting a vertex on one front to a vertex on another
 ***************************************************************************************
 
 - the papers just say connect using a distance criterion:
 
 - hartmann,   choose closest pair from the WHOLE front(s)
 - samir et al.   crack paste: choose an edge, find closest vertex 'facing' (?) edge.
 - karkanis:   too fucking hard
 - araujo:     fuck knows
 
 None are very good.  
 
 ***************************************************************************************
 Need to check whether the connection is actually valid.
 We can do this just with a regular edge vs. front edge BVH check,  making sure
 to explicitly check for and disallow connections which coincide with an existing mesh edge.
 ***************************************************************************************
 
 - but how to choose candidate connections?
 
 ***************************************************************************************
 Obviously, use original front vertex as one endpoint,
 and take other endpoint as one of the vertices belonging to the edges involved
 in the collision. Try the closest first, and apply edge check to each until we get a winner.
 ***************************************************************************************
 
 
 [What if no winner exists? [i.e. all available connections are invalid].
 We cannot allow this - then the algorithm is stalled.
 
 <devil>
 Hmm... could actually allow insertion of a vertex on the closest edge visible 
 to the front vertex'
 </devil>
 
 This will (almost) always succeed (unless no edge is even visible - fuck that though).
 This is a good method! [no need to do insertion if a *vertex* is visible though - 
 which is almost always, presumably]
 
 So just need to understand:
 
 - how to identify the 'closest' edge 'visible' to a front vertex
 - how to do the vertex insertion [need front edge -> mesh triangle map :(]0
 ]
 
 However - 'probably' OK to just go with:
 
 ***************************************************************************************
 
 - if no winner exists, just choose the best candidate connection we can
 (probably just the closest).
 
 - actually better to use various metrics of goodness:
 
 - close proximity of connection to other edges is bad
 - if making the connection would create sliver triangles, that is bad
 
 ***************************************************************************************
 
 Then we're guaranteed to be able to continue to completion.
 
 The front 'endgame' may or may not pose problems, not sure. [shouldn't though].
 
 

 */




template<typename Functor>
bool pasteCracks(Front& front, Geometry& mesh, float tol)
{
	if (front.m_vertices.size()<3)
	{
		return false;
	}
	/*
	 OK..   this is fucked at the moment.  But the basic idea is sound..
	 
	 -   have a data structure which allows us to both keep track of the order of vertices on the front,
	 and efficiently rank the vertices by opening angle.
	 
	 - e.g. build a map:   front vertex index -> front vertex list iterator
	 (build on starting crack pasting phase)
	 
	 - then we can sort vertex indices by opening angle, 
	 process the highest ranked (i.e. lowest angle) vertex which admits bridging, 
	 find its neighbours, update the list, update their angles, update the map with their entries, then
	 delete the vertex from the map and the list.
	 
	 -  try to clip the vertices with lowest opening angle first.
	 
	 - when we clip, we just need to update the opening angles of the neighbours 
	 
	 - we do the test "can clip"/"is ear" by a regular candidateEdgeFits() fits call for the "bridge" (as during the growth phase).
	 though it should actually use a E-E test with a tighter tolerance, to allow for the potentially closer approaches of the cracks.
	 The tolerance should be small enough to allow these closer approaches, but big enough to prevent
	 missing genuine overlap of non-co-planar front edges..  tricky..  
	 In practice, we can assume the non-planarity of the front edges (<<ROC) is considerably lower than the distance
	 of approach of front edges, which is of order ROC by construction [true? - can it be lower/zero?]. 
	 So a tolerance of order 0.25*ROC should suffice.
	 
	 
	 NOTE, Araujo et al. expand at the vertex with minimal front angle each step, during GROWTH.
	 Which is probably good for mesh quality and stability.  This also requires the ranking data structure.
	 
	 
	 */
	
		// Try to clip the vertices with lowest opening angle.
		//printf("\n\n----------------- current front, sorted by angle ---------------------\n");
	vector<Front::Vertex> sortedFrontVertices;
	sortVerticesByAngle(front, sortedFrontVertices);
		//printf("\n\n--------------------------- insert bridge ----------------------------------------\n");
	int nVertex = 0;
	while (nVertex<sortedFrontVertices.size())
	{
		int frontVertexIndex = sortedFrontVertices[nVertex].m_index;
		
		list<Front::Vertex>::iterator frontVertex = front.m_vertexIndexToFrontIterator[frontVertexIndex];
			//printf("Trying front vertex with opening angle: %f\n", (180.f/M_PI)*frontVertex->m_openingAngle);
		
		list<Front::Vertex>::iterator prevVertex = frontVertex; 
		prevVertex--;
		if (prevVertex == front.m_vertices.end()) prevVertex--;
		
		list<Front::Vertex>::iterator nextVertex = frontVertex; 
		nextVertex++;
		if (nextVertex == front.m_vertices.end()) nextVertex++;
		
			// Check if the bridge is admissable.. check its fat edge fits
		FatEdge bridge(prevVertex->m_index, nextVertex->m_index, mesh, m_vertexRocs);
		
		FatEdge hitEdge;
		if ( front.candidateEdgeFits(bridge, hitEdge, mesh, 1.f) )
		{
				//printf("Edge fits.");
			
				// Modify the front vertex list
			front.m_vertexIndexToFrontIterator.erase(frontVertex->m_index);
			front.m_vertices.erase(frontVertex);
			
				// Add new triangle to mesh
			{
				Geometry::Triangle tri;
				tri.m_vertex[0] = frontVertex->m_index;
				tri.m_vertex[1] = prevVertex->m_index;
				tri.m_vertex[2] = nextVertex->m_index;
				m_mesh.m_triangles.push_back(tri);
			}
			
				// Rebuild the front BVH
			front.rebuildFatEdgeBVH(mesh, m_vertexRocs);
			
				// Update the front angles of the bridge ends
			prevVertex->m_openingAngle = computeOpeningAngle(prevVertex, front, mesh);
			nextVertex->m_openingAngle = computeOpeningAngle(nextVertex, front, mesh);
			
				//printf("New neighbour angles: %f %f\n", (180.f/M_PI)*prevVertex->m_openingAngle, (180.f/M_PI)*nextVertex->m_openingAngle);
			return true;
		}
		
		nVertex++;
	}
	
	return false;
}



if (0)
{
	int nActive=0;
	list<Front::Vertex>::iterator iter = front.m_vertices.begin();
	while (iter != front.m_vertices.end())
	{
		if (iter->m_active) nActive++;
		iter++;
	}
	
	/*
	 printf("\n\n###########################\nFront before modification: %d %d\n", front.m_vertices.size(), nActive);
	 list<Front::Vertex>::iterator iter = front.m_vertices.begin();
	 while (iter != front.m_vertices.end())
	 {
	 if (iter->m_active) printf("%d -> ", iter->m_index);
	 iter++;
	 }	
	 */
}



if (0)
{
	printf("\nFront after modification: %d\n", front.m_vertices.size());
	/*
	 list<Front::Vertex>::iterator iter = front.m_vertices.begin();
	 while (iter != front.m_vertices.end())
	 {
	 printf("%d -> ", iter->m_index);
	 iter++;
	 }
	 printf("\n");
	 */
}

	//####################################################################################################
	//# Almost-2d triangulation
	//####################################################################################################


inline float area2(const V3f& a, const V3f& b, const V3f& c)
{
	return (b[0]-a[0])*(c[1]-a[1]) - (c[0]-a[0])*(b[1]-a[1]);
}


inline bool _xor(bool x, bool y)
{
	return !x ^ !y;
}


bool left(const V3f& a, const V3f& b, const V3f& c)
{
	return area2(a, b, c) > 0.0f;
}


inline float segmentsIntersect(const V3f& segment0_start, const V3f& segment0_end,
							   const V3f& segment1_start, const V3f& segment1_end)
{
	return _xor( left(segment0_start, segment0_end, segment1_start), left(segment0_start, segment0_end, segment1_end) ) && 
	_xor( left(segment1_start, segment1_end, segment0_start), left(segment1_start, segment1_end, segment0_end) );
}





	//####################################################################################################



/*
 
 Vertex based algorithm 2:   [single front here, for simplicity - later extend to multiple disconnected seed fronts]
 
 
 required:
 
 - this algorithm uses edge-edge based collision detection. However, these edges are not line segments in 3d, but 
 line segments in 3d projected onto the 2d isosurface.
 
 - When comparing 3d edges for intersection, we fatten them first, using the radius of curvature of the endpoints.
 This keeps growing front edges from approaching too closely.
 
 - During the crack filling phase, we project all edges within a certain distance of the ear being clipped into 
 a local plane. 
 
 
 GROWING PHASE
 
 - maintain fronts with vertex linked lists.  
 
 - generate candiate vertices from existing front vertices, using:
 
 - the local r.o.c as the edge size heuristic
 - an angle criterion to try to keep triangles equilateral
 
 - overlap detection is edge-based. 
 
 - we rebuild the BVH of the entire set of front edges whenever the front changes, 
 and the bounding boxes of the edge endpoints are expanded by their local r.o.c.
 
 - whenever we try to add a new set of vertices at an existing vertex (and the corresponding new mesh edges), we make the expanded new edges, and check
 for overlap of each new expanded edge with the existing front (expanded-)edge BVH.  
 
 - NB, new expanded edges will obviously hit the 2 expanded edges connected to the original vertex, as well as the adjacent ones on the front. 
 Therefore, the edges connected to the original vertex are ignored (since obviously they cannot truly collide due to the construction of the fan), 
 while the adjacent ones are still tested for exact overlap but not expanded - 
 (since they may pass through the new edges, in which case the new edges are invalid).
 
 - (though first, check whether the candidate vertex is outside the bounding box. If so, don't add it, and mark the vertex
 as a boundary vertex, which is subsequently skipped (during the growing phase).
 
 - if we get overlap, do the narrowphase by getting the closest points on the two unexpanded edges, then 
 we consider it a hit if the distance between the points is less than the sum of the r.o.c at the points
 (where the r.o.c. at the points is just obtained by linear interpolation of the r.o.c at the endpoint vertices).
 
 - we add as many points as we can until either overlap prevents adding any more. The remaining front points are obviously on the crack.
 
 - routines required:
 
 float rocAtPoint();
 float openingAngleAtFrontVertex()
 
 
 
 CRACK FILLING PHASE
 
 - then we have to paste the cracks.  This is done in O(n^2) [where n is number of vertices in front], as follows:
 
 - //first build a BVH of the bboxes of all the edges on the front (maybe later - initially use O'Rourke's brute force version => O(n^2)).
 - then do a form of ear-clipping based triangulation:
 
 - apply O'Rourke's 2d triangulation routines by setting the z-axis to the local normal, which we assume not
 to vary significantly on the scale of triangle. 
 
 - n = number of vertices in front
 - first flag whether each vertex is an ear tip (i.e. v(i-1)<->v(i+1) is diagonal - [using BVH (expanded?)] and edge-edge narrowphase)
 
 while n>3:
 
 find a vertex which is flagged as an ear, vi
 add a new triangle by creating the diagonal v(i-1)<->v(i+1)
 delete vi from the front
 recompute the ear tip status of v(i-1), v(i+1)
 
 - after this phase the mesh should be complete.
 
 - routines required:
 
 bool isDiagonal(A, B)
 
 
 */





#endif


