#include <GL/glew.h>		//Handles extension loading for Windows
#include <SFML/Window.hpp>	//Cross platform Windowing
#include <fstream>
#include <vector>
#include <set>
//#include <SFML/Graphics/Image.hpp>
#include <iostream>			//Cout for error messages
#include "Mesh.h"			//Mesh Class
#include "Shader.h"			//Shader code
#include "ShaderProgram.h"	
#include "Camera.h"

#include "ppmesh.hh"		//Chengwei's progressive mesh headers
#include "gfmesh.hh"

#define BASEMESHSPLIT 5		//Number of times we want to split base progressive mesh for all angles
							//We do this because the base mesh loaded is too low poly and does not represent the 
							//actual mesh well enough
#define DETAILMESHSPLIT 12	//How many levels to split before generating normal map. This value is inclusive of BASEMESHSPLIT
							// and should always be more than BASEMESHSPLIT

//Window width/height
#define WIDTH 800
#define HEIGHT 600

int mode = 0;				//Demo mode or using gMesh

//Using SFML library for windowing/input OS specific operations
sf::Window canvas;			//Our window object
Mesh* mesh;					//The mesh we are drawing
Mesh* pMesh;				//Mesh derived from progressive mesh

///Shaders
Shader* nmVs;
Shader* nmFs;
ShaderProgram* nmGen;			//nmGen generates the screen space normal map into a FBO(Frame Buffer Object)
Shader* tmVs;
Shader* tmFs;
ShaderProgram* tmProg;			//tmProg is the standard texture mapping shader

//Slight modification to discard texturing if UV coordinate is invalid(backfacing)
//Used to draw low res bunny with uv generated from nmGen pass.
Shader* tmVs2;
Shader* tmFs2;
ShaderProgram* tmProg2;			//tmProg2 is a modified tmProg which checks the generated UV Coords 
//to determine if it should be textured with normal data or be colored
//using standard lighting equations

Shader* vVs;
Shader* vFs;
ShaderProgram* vProg;			//vProg is a very simple vertex only shader. Only outputs raw vertices to screen
//usually used to debug

///Camera and Lights
Camera cam;
glm::vec4 lightPos;

///Matrices
glm::mat4 modelView, modelT, modelR; //modelT/R are used in calculation of Model Matrix
glm::mat3 normalMatrix;				 //Used to transfrom object normal to eyespace normals (Inverse-Transpose of ModelView)

float rotate;						//Mesh rotation around Y-axis

///Framebuffer stuff
unsigned int FBOid;					//GL id to hold FBO
unsigned int FBOtex;				//FBO texture id
unsigned int RBDepthid;				//Renderbuffer for depthbuffer

///Transform Feedback stuff
unsigned int tfmQuery, tfmQuery2;
unsigned int uvBuffer;				//GL Buffer to store generated UV Coords
std::vector<float> uvData;			//CPU Buffer readback stored here
std::vector<float> uvDataIn;		//Backup of uvData
std::set<unsigned int> mappedIDs;	//This stores the list of vertex IDS which we have normal map information for

std::map<VertexID, BitString> pmDic;	//A map to store and link vertex IDs with their respective Bitstrings

bool newUV = true;					//Are there any newUVs to read back?
bool init = false;					//Is PM Mode being called for first time?
Gfmesh* gmesh;						//The progressive mesh 
Gfmesh* gmeshBKUP;					//A backup of the progressive mesh split to BASEMESHSPLIT levels which we can reuse
									//everytime the normal map needs to be regenerated instead of loading the entire progressive
									//mesh again
//sf::Image texture;

//Forward declarations
void display(bool doUV = true, bool drawFBO = true);
void getNewUV();
void splitFrontFaces(bool splitAll = false);

//Handles keyboard input and mode changes
void handleInput()
{	
	sf::Event Event;
	while (canvas.GetEvent(Event))
	{		
		if (Event.Type == sf::Event::Closed)
		{//Window closed
			canvas.Close();
		}
		else if (Event.Type == sf::Event::KeyPressed)
		{
			switch(Event.Key.Code)
			{//Escape key pressed
			case sf::Key::Escape:
				canvas.Close();
				break;
				//Generate a new normal map and new UV Coordinates for the mesh
			case sf::Key::Return:
				display();				
				break;
			case sf::Key::F1://Demo mode
				mode = 0;
				break;
			case sf::Key::F2://Progressive Mesh mode
				mode = 1;
				cam.pos = glm::vec3(0, 0.05f, 0.7f);		//Reset camera position
				if(!init)
				{//Only done once
					//Split progressive mesh to BASEMESHSPLIT levels
					for(int i = 0; i < BASEMESHSPLIT; ++i) //not sure if gfmesh has any api to split all the way down in 1 call
					{//NOTE: There is probably a better/faster way to do this then to (split->readback->split) X5
						display(true, false);	//Run first pass to determine front facing vertices					
						splitFrontFaces( true );//we want to split whole mesh down to get a decent base mesh,
					}	
					display(true, false);		//call display one more time to read back UVBuffer
					uvDataIn.insert(uvDataIn.begin(), uvData.begin(), uvData.end());	//Make a backup copy of our base UVBuffer
					//We need to go through this initial base mesh and determine which vertices we have normalmap info for
					for(int i = 0; i < uvDataIn.size(); i+=3)
					{
						if( uvDataIn[i+2] > 0 )//If this vertex was facing front, we must be having its normal in the map
						{
							mappedIDs.insert( gmesh->index2id(i/3) );
						}
					}
					//Make a backup of our "Base" mesh
					gmeshBKUP = new Gfmesh(*gmesh);
					gmeshBKUP->update();
					init = true;		//Init is done, don't need to call it again, EVER

				}
				//Now split the front facing vertices to DETAILMESHSPLIT levels to generate the normal map
				for(int i = BASEMESHSPLIT; i < DETAILMESHSPLIT; ++i) //not sure if gfmesh has any api to split all the way down in 1 call
				{//NOTE: There is probably a better/faster way to do this then to (split->readback->split) X5
					display(true, false);	//Run first pass to determine front facing vertices					
					splitFrontFaces();		//Then split further down for front facing vertices
				}					
				//By this point, we should have a dense frontal mesh with a decent structure for the back
				display();//Display again to generate normal map this time

				//delete our hybrid high poly meshes
				delete pMesh;
				delete gmesh;
				//Create a new mesh from backed up "base" mesh
				gmesh = new Gfmesh( *gmeshBKUP );
				pMesh = new Mesh( gmeshBKUP );	
				display(true, false);	//render again to get base mesh UVs(for the low poly version) but do not generate normal map again(we already have a high res normal map)
				std::cout<<"Rendering Base Mesh with "<<pMesh->numVertices<<" vertices.\n";
				
				break;
			}

		}
	}
	///Do keyboard movement here
	if(canvas.GetInput().IsKeyDown(sf::Key::Left))
	{
		cam.setCameradX(-0.01f);
	}
	if(canvas.GetInput().IsKeyDown(sf::Key::Right))
	{
		cam.setCameradX(0.01f);
	}
	if(canvas.GetInput().IsKeyDown(sf::Key::Down))
	{
		cam.setCameraZoomd(0.01f);
	}
	if(canvas.GetInput().IsKeyDown(sf::Key::Up))
	{
		cam.setCameraZoomd(-0.01f);
	}
	if(canvas.GetInput().IsKeyDown(sf::Key::LControl))
	{//Control to rotate mesh
		rotate += 0.5f;
		//TODO:
		//1)read back UVBuffer to know which are vertices facing front 
		//2)Check every 3rd float to see which vertices are facing camera
		//3)For every vertex facing front, get its vertex id gmesh->index2id()
		//4) Check the mappedIDs to see if the vertex already has normalmap generated
		//5) else split the vertex to the maximum level
	}
	if(canvas.GetInput().IsKeyDown(sf::Key::PageDown))
	{//Control to move camera down
		cam.setCameradY(-0.01f);
	}
	else if(canvas.GetInput().IsKeyDown(sf::Key::PageUp))
	{//Control to move camera up
		cam.setCameradY(0.01f);
	}

}
///Initialise OpenGL extensions and default states
void initGL()
{
	GLenum err = glewInit();//Initialise Glew to handle extensions
	if (GLEW_OK != err)
	{
		std::cout<<"Error, could not init GLEW\n";
	}
	if(GLEW_VERSION_3_2)
	{
		std::cout<<"OpenGL 3.2 supported!\n";
	}	
	else
	{
		std::cout<<"OpenGL 3.2 not supported! Please update your graphic drivers.\n";		
	}
	if(GLEW_EXT_transform_feedback)
	{
		std::cout<<"Transform Feedback EXT supported!\n";
	}
	std::cout<<"OpenGL Renderer : "<<glGetString(GL_RENDERER)<<"\n";
	std::cout<<"OpenGL Version : "<<glGetString(GL_VERSION)<<"\n";

	/*std::cout<<"Extension support for hardware : \n";
	int numExt = 0;
	glGetIntegerv(GL_NUM_EXTENSIONS,&numExt);
	for(int i = 0; i < numExt; ++i)
	{
	std::cout<<glGetStringi(GL_EXTENSIONS,i)<<" ";

	}*/

	glViewport(0, 0, WIDTH, HEIGHT);		// Set the viewport
	glClearColor(0.5f,0.5f,0.5f,1.0f);		//grey background
	glEnable(GL_DEPTH_TEST);				//Enable Depth Testing
	//glDisable(GL_CULL_FACE);				//Backface culling


	cam.pos = glm::vec3(0, 0.05f, 0.7f);		//Set camera position
	lightPos = glm::vec4(0,0.0f,1.0f,1.0f);	//Light shining from camera into screen

	modelView = glm::mat4(1.0f);			//Set modelview Matrix
	rotate = 0;								//Set model rotation angle to 0

	///Dump GL Extensions to file
	std::ofstream fout;
	fout.open("GLExtensions.txt");
	int numExt = 0;
	int i;
	glGetIntegerv(GL_NUM_EXTENSIONS,&numExt);
	for(i = 0; i< numExt; ++i)
	{
		fout<<glGetStringi(GL_EXTENSIONS,i)<<" ";	  
	}
	fout.close();


}

///Loading of PM is very slow(not that slow in Release mode anymore, takes about 6s),
//) Possible Optimisations
//1) Use memory mapped files
//2) use another thread and load in background
//3) Load entire file into user-allocated memory and then read from there. (This is still slower than 1)
void progressiveMeshInit()
{
	std::ifstream fin("models/happy2.ppm", std::ios::binary);
	gmesh = new Gfmesh(fin);
	int count = gmesh->n_detail_vertices();
	//count = 1;	//Number of Splits to apply

	VertexID    id;
	unsigned int len;
	std::cout<<"Loading Progressive Mesh\n";
	for (int i = 0; i < count; i++)
	{
		BitString data;
		size_t   pos = 0;
		fin.read((char *)&id, sizeof(id));
		fin.read((char *)&len, sizeof(len));
		data.read_binary(fin, len);
		pmDic[id] = data;		
		/*	if(i%10000)
		{//to prevent hanging appearance for such a long load time and to show progress
		std::cout<<"\r"<<(int)(i/(float)count * 100)<<"%";	
		handleInput();
		}*/
		//gmesh.decode(id, data, &pos);		
	}	
	std::cout<<"\rDone!\n";
	gmesh->update();		//generate triangle info in progressive mesh
	//gmesh.ppmesh_->dec
	pMesh = new Mesh(gmesh);	//copy over triangle info into Mesh

}

void loadResources()
{
	progressiveMeshInit();
	mesh = new Mesh();
	mesh->loadPlyFile("models\\HappyBuddha.ply");			//Load our mesh file
	//UV buffer
	glGenQueries(1, &tfmQuery);								//Generate 2 query requests for transform feedback phase
	glGenQueries(1, &tfmQuery2);
	glGenBuffers(1, &uvBuffer);								//Generate a Buffer to store our transform feedback results
	glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);				//Bind the buffer and allocate memory for the buffer
	glBufferData(GL_ARRAY_BUFFER, 3*sizeof(float)*mesh->numVertices, 0, GL_STREAM_DRAW);//Each triangle has 3 vertices X 2 UV

	//Generate shaders, compile and link them into a shader program
	//Shader description is documented and the top of this file at declaration
	nmVs = new Shader();
	nmVs->loadFromFile("shaders\\default.vert", GL_VERTEX_SHADER);
	nmFs = new Shader();
	nmFs->loadFromFile("shaders\\default.frag", GL_FRAGMENT_SHADER);
	nmGen = new ShaderProgram(nmVs, nmFs);
	nmGen->addAttribute("vPos");
	nmGen->addAttribute("vNor");
	//nmGen->addAttribute("vUV");
	const char *vars[] = {"uvPos"};		//Define which varying variables we want to extract from transform feedback phase
	glTransformFeedbackVaryings(nmGen->getID(), 1, vars, GL_SEPARATE_ATTRIBS);
	nmGen->Link();

	tmVs = new Shader();
	tmVs->loadFromFile("shaders\\textureMap.vert", GL_VERTEX_SHADER);
	tmFs = new Shader();
	tmFs->loadFromFile("shaders\\textureMap.frag", GL_FRAGMENT_SHADER);
	tmProg = new ShaderProgram(tmVs, tmFs);
	tmProg->addAttribute("vPos");
	tmProg->addAttribute("vUV");
	tmProg->Link();

	tmVs2 = new Shader();
	tmVs2->loadFromFile("shaders\\textureMap2.vert", GL_VERTEX_SHADER);
	tmFs2 = new Shader();
	tmFs2->loadFromFile("shaders\\textureMap2.frag", GL_FRAGMENT_SHADER);
	tmProg2 = new ShaderProgram(tmVs2, tmFs2);
	tmProg2->addAttribute("vPos");
	tmProg2->addAttribute("vNor");
	tmProg2->addAttribute("vUV");
	tmProg2->Link();

	vVs = new Shader();
	vVs->loadFromFile("shaders\\Vertex.vert", GL_VERTEX_SHADER);
	vFs = new Shader();
	vFs->loadFromFile("shaders\\Vertex.frag", GL_FRAGMENT_SHADER);
	vProg = new ShaderProgram(vVs, vFs);
	vProg->addAttribute("vPos");
	vProg->addAttribute("vNor");
	vProg->Link();

	///Framebuffer loading
	//The framebuffer is synonymous to Render 2 texture in DirectX but with more flexibility
	//We need this to generate the normal map offscreen
	//Generate FBO
	glGenFramebuffers(1, &FBOid);
	glBindFramebuffer(GL_FRAMEBUFFER, FBOid);
	//Framebuffer needs atleast a color and depth buffer, we use a texture for the color part
	glGenTextures(1, &FBOtex);
	glBindTexture(GL_TEXTURE_2D, FBOtex );
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 1024, 
		1024, 0, GL_RGB, GL_UNSIGNED_BYTE, 0);//set size to 1024X1024 but submit no data(data is generated from shader)
	
	//The color information will be rendered to an actual OpenGL texture instead of a color buffer
	//Because we intend to use the rendered normal map as an texture later on
	glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, FBOtex, 0);	//bind texture to FBO
	if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
	{
		std::cout<<"FrameBuffer Object Creation Error.\n";

	}


	//Gen and add Depth Renderbuffer
	//We need a depth buffer to prevent z ordering problems even in offscreen rendering
	glGenRenderbuffers(1, &RBDepthid);
	glBindRenderbuffer(GL_RENDERBUFFER, RBDepthid);
	glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, 1024, 1024);
	glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, RBDepthid);//Bind depth buffer to FBO
	GLenum stat = glCheckFramebufferStatus(GL_FRAMEBUFFER);
	std::cout<<stat<<" "<<GL_FRAMEBUFFER_COMPLETE<<"\n";
	glClear(GL_COLOR_BUFFER_BIT);
	glBindFramebuffer(GL_FRAMEBUFFER, 0);



}

//Draws Mesh on the screen, points defines if we want to only render vertices(used for transform feedback phase)
void drawMesh(Mesh *m, bool points = false)
{
	int stride = 0;
	if(m->hasNormals)
	{
		if(m->hasTexture)
		{//set stride length in bytes
			stride = 32; //12 pos, 12 nor, 8 UV
		}
		else
		{
			stride = 24;//12 pos, 12 nor
		}
	}

	glBindBuffer(GL_ARRAY_BUFFER, m->vboID);						//Bind Vertex Buffer
	if(!points)
	{
		glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m->indexID);			//Bind Index Buffer if we are rendering polygons 
	}
	glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, stride, 0);		//Bind vertex input to shader
	glEnableVertexAttribArray(0);									//Enable first attribute
	if(m->hasNormals)
	{
		glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, stride, (char*)(0 + 3*sizeof(float)));//Bind vertexNormals input to shader
		glEnableVertexAttribArray(1);
		if(m->hasTexture)
		{			
			//glUniform1i(nmtextureSamplerID, 0);
			glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, stride, (char*)(0 + 6*sizeof(float)));//Bind UV input to shader
			glEnableVertexAttribArray(2);
		}
	}

	if(points)
	{
		glDrawArrays(GL_POINTS, 0, m->numVertices);			//Only pass in vertices and draw them directly
	}
	else
	{
		glDrawElements(GL_TRIANGLES,m->index.size(),GL_UNSIGNED_INT, NULL);	//Use index buffer to draw mesh
	}

	//disable all bound buffers
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); 
	glBindBuffer(GL_ARRAY_BUFFER, 0); 
	glBindTexture(GL_TEXTURE_2D, 0 );
	glDisableVertexAttribArray(2);//Disable 3rd attribute because display() may be called to generate new UV Coords which only uses 2 attributes

	//GLfloat vVertices[] = {0.0f, 0.5f, 0.0f,
	//	-0.5f, -0.5f, 0.0f,
	//	0.5f, -0.5f, 0.0f};
	//
	//	// Load the vertex data
	//glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vVertices);
	//glEnableVertexAttribArray(0);
	//glDrawArrays(GL_TRIANGLES, 0, 3);
}

///This function does 2 passes of the geometry.
///1St pass(doUV) is points only where the new UV coordinates are generated and stored in FBOid buffer.
///The visual output is discarded
///2nd Pass(drawFBO) is to generate the normal map from the current camera viewpoint
///The output of both these passes are used together (UVCoordinates and normalmap) to map the hybrid(low & high res mesh)
/// in display2() which is called every frame.
void display(bool doUV, bool drawFBO)
{
	if(doUV)
	{//we want to process UV Coordinates and read them back
		//Reallocate buffer and discard previous buffer(usually mesh size has changed when display is called)
		glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);		
		if(mode == 0)
		{
			glBufferData(GL_ARRAY_BUFFER, 3*sizeof(float)*mesh->numVertices, 0, GL_STREAM_DRAW);	//Allocate enough memory to hold UVZ data
		}
		else
		{
			glBufferData(GL_ARRAY_BUFFER, 3*sizeof(float)*pMesh->numVertices, 0, GL_STREAM_DRAW);	//Allocate enough memory to hold UVZ data
		}
		glBindBuffer(GL_ARRAY_BUFFER, 0);

		glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, uvBuffer);				//Bind Transform feedback buffer

		nmGen->Use();								//Bind our normalmap generator shader

		////
		////Draw Mesh into FBO
		////
		modelT = glm::translate<float>( glm::mat4(1.0f), glm::vec3(0.05f, -0.1f, 0) );	//mesh position
		modelR = glm::rotate<float>(modelT,rotate, glm::vec3(0 ,1.0f, 0) );		//mesh rotation
		modelView = cam.getViewMatrix() * modelR ;								//Form modelview matrix
		normalMatrix = glm::mat3(glm::transpose(glm::inverse(modelView)));		//Calculate normal matrix
		//Set uniforms
		glUniformMatrix4fv(nmGen->getUniformLocation("modelView"), 1, GL_FALSE, glm::value_ptr(modelView) );
		glUniformMatrix4fv(nmGen->getUniformLocation("perspective"), 1, GL_FALSE, glm::value_ptr(cam.getPerspMatrix()) );//Once per frame
		glUniformMatrix3fv(nmGen->getUniformLocation("normalMatrix"), 1, GL_FALSE, glm::value_ptr(normalMatrix) );
		glUniform4fv(nmGen->getUniformLocation("lightPos"), 1, glm::value_ptr(lightPos));


		glViewport(0,0,1024, 1024);				//Set viewport to texture size

		glBeginQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN ,tfmQuery);			//Begin Xfm feedback query
		glBeginQuery(GL_PRIMITIVES_GENERATED ,tfmQuery2);							//Begin Xfm feedback query
		glBeginTransformFeedback(GL_POINTS);										//Begin actual Xfm Feedback	
		glEnable(GL_RASTERIZER_DISCARD);											//Disable Drawing again	
		if(mode == 0)
		{
			drawMesh(mesh, true);
		}
		else
		{
			drawMesh(pMesh,true);
		}

		glDisable(GL_RASTERIZER_DISCARD);											//Enable Drawing
		glEndTransformFeedback();													//Stop Xfm Feedback
		glEndQuery(GL_PRIMITIVES_GENERATED);										//End Xfm feedback Query	
		glEndQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN);						//End Xfm feedback Query
		if( glGetError() != GL_NO_ERROR )
		{			//Check for any OpenGL errors last frame
			std::cout<<"OpenGL error occurred\n";
		}
		glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, 0);
	}
	if(drawFBO)
	{
		glBindFramebuffer(GL_FRAMEBUFFER, FBOid);	//Bind our framebuffer object to render to
		glViewport(0,0,1024, 1024);					//match texture size
		glClearColor(0.0f,0.0f,0.0f,1.0f);			//black background
		glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );	//Clear Texture

		if(mode == 0)
		{
			drawMesh(mesh);	//Draw into FBO
		}
		else
		{
			drawMesh(pMesh);
		}
		glBindFramebuffer(GL_FRAMEBUFFER, 0);		//unbind FBO
		if( glGetError() != GL_NO_ERROR )
		{			//Check for any OpenGL errors last frame
			std::cout<<"OpenGL error occurred\n";
		}
		newUV = true;	
	}
	if(doUV)
	{
		getNewUV();
	}


}



	///Retrieves new UV coordinates 
	///UVData is stored in uvData vector
	void getNewUV()
	{
		unsigned int queryResult = 0;
		//Check Query result
		glGetQueryObjectuiv(tfmQuery, GL_QUERY_RESULT_AVAILABLE, &queryResult);		
		if(queryResult)
		{
			glGetQueryObjectuiv(tfmQuery2, GL_QUERY_RESULT, &queryResult);
			std::cout<<"Num primitives drawn "<<queryResult<<"\n";
			glGetQueryObjectuiv(tfmQuery, GL_QUERY_RESULT, &queryResult);
			std::cout<<"Num primitives written "<<queryResult<<"\n";
		}

		if(mode == 0)
		{
			uvData.resize(mesh->numVertices * 3 );//resize buffer first
		}
		else if(mode == 1)
		{
			uvData.resize(pMesh->numVertices * 3 );//resize buffer first
		}
		///Bind the buffer
		glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, uvBuffer);
		///Read back Transform feedback data into uvData vector
		glGetBufferSubData(GL_TRANSFORM_FEEDBACK_BUFFER, 0, uvData.size() *  sizeof(float), &uvData[0] );

		/*
		std::ofstream fout;
		fout.open("Transformed.txt");
		for(unsigned int i = 0; i < uvData.size(); ++i)
		{//These are the generated UV Coordinates for every vertex that need to be used to map the generated normal map onto mesh
		//Outputting to file for debugging or reference
		fout<<uvData[i]<<" ";
		}
		fout.close();
		*/
		newUV = false;
	}


	//Used for normal rendering every frame
	void display2()
	{

		glClearColor(0.1f,1.0f,1.0f,1.0f);//grey background
		glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );	//Clear Screen
		glViewport(0,0,WIDTH, HEIGHT);							//back to screen size

		modelT = glm::translate<float>( glm::mat4(1.0f), glm::vec3(0.05f, -0.1f, 0) );	//mesh position
		modelR = glm::rotate<float>(modelT,rotate, glm::vec3(0 ,1.0f, 0) );		//mesh rotation
		modelView = cam.getViewMatrix() * modelR ;								//Form modelview matrix
		normalMatrix = glm::mat3(glm::transpose(glm::inverse(modelView)));		//Calculate normal matrix
		if(mode == 0)
		{//Demo mode
			tmProg2->Use();
			//Set uniforms
			glUniformMatrix4fv(tmProg2->getUniformLocation("modelView"), 1, GL_FALSE, glm::value_ptr(modelView) );
			glUniformMatrix4fv(tmProg2->getUniformLocation("perspective"), 1, GL_FALSE, glm::value_ptr(cam.getPerspMatrix()) );//Once per frame
			glUniformMatrix3fv(tmProg2->getUniformLocation("normalMatrix"), 1, GL_FALSE, glm::value_ptr(normalMatrix) );
			//drawMesh(mesh);//Cant use drawMesh here, we want to use Xfm_Feedback buffer as UV so we have to manually do it
			glBindBuffer(GL_ARRAY_BUFFER, mesh->vboID);						//Bind Vertex Buffer
			glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->indexID);				//Bind Index Buffer    
			glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, 0);		//Bind vertex input to shader
			glEnableVertexAttribArray(0);								//Enable Position
			glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, (char*)(0 + 3*sizeof(float)) );		//Bind normal input to shader
			glEnableVertexAttribArray(1);								//Enable normals
			//Skip normals
			//glUniform1i(nmtextureSamplerID, 0);

			glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);					//Bind generated UVCoords Buffer

			glUniform1i(tmProg2->getUniformLocation("Sampler"), 0);		//Set uniform location for normalMap
			glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 12, 0);		//Bind UV input to shader(Using 3 components for UV, uvz) z specifies front/back facing
			glEnableVertexAttribArray(2);								//Enable UV
			glBindTexture(GL_TEXTURE_2D, FBOtex);						//Bind the generated normalMap as texture
			glDrawElements(GL_TRIANGLES,mesh->index.size(),GL_UNSIGNED_INT, NULL);	//Draw Mesh

			///Disable and unbind modes
			glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); 
			glBindBuffer(GL_ARRAY_BUFFER, 0); 
			glBindTexture(GL_TEXTURE_2D, 0 );
			glDisableVertexAttribArray(1);
			glDisableVertexAttribArray(2);
		}
		else
		{
			/*
			//Using drawMesh here because we are not submitting UVData yet because we are unable to split mesh so far
			//Once we can generate UVDataIn buffer, code here should look similar to above if block
			vProg->Use();
			glUniformMatrix4fv(vProg->getUniformLocation("modelView"), 1, GL_FALSE, glm::value_ptr(modelView) );
			glUniformMatrix4fv(vProg->getUniformLocation("perspective"), 1, GL_FALSE, glm::value_ptr(cam.getPerspMatrix()) );//Once per frame
			glUniformMatrix3fv(vProg->getUniformLocation("normalMatrix"), 1, GL_FALSE, glm::value_ptr(normalMatrix) );
			drawMesh(pMesh);
			*/

			tmProg2->Use();
			//Set uniforms
			glUniformMatrix4fv(tmProg2->getUniformLocation("modelView"), 1, GL_FALSE, glm::value_ptr(modelView) );
			glUniformMatrix4fv(tmProg2->getUniformLocation("perspective"), 1, GL_FALSE, glm::value_ptr(cam.getPerspMatrix()) );//Once per frame
			glUniformMatrix3fv(tmProg2->getUniformLocation("normalMatrix"), 1, GL_FALSE, glm::value_ptr(normalMatrix) );
			//drawMesh(mesh);//Cant use drawMesh here, we want to use Xfm_Feedback buffer as UV so we have to manually do it
			glBindBuffer(GL_ARRAY_BUFFER, pMesh->vboID);						//Bind Vertex Buffer
			glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, pMesh->indexID);				//Bind Index Buffer    
			glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, 0);		//Bind vertex input to shader
			glEnableVertexAttribArray(0);								//Enable Position
			glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, (char*)(0 + 3*sizeof(float)) );		//Bind normal input to shader
			glEnableVertexAttribArray(1);								//Enable normals
			//Skip normals
			//glUniform1i(nmtextureSamplerID, 0);

			glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);					//Bind generated UVCoords Buffer

			glUniform1i(tmProg2->getUniformLocation("Sampler"), 0);		//Set uniform location for normalMap
			glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 12, 0);		//Bind UV input to shader(Using 3 components for UV, uvz) z specifies front/back facing
			glEnableVertexAttribArray(2);								//Enable UV
			glBindTexture(GL_TEXTURE_2D, FBOtex);						//Bind the generated normalMap as texture
			glDrawElements(GL_TRIANGLES,pMesh->index.size(),GL_UNSIGNED_INT, NULL);	//Draw Mesh

			///Disable and unbind modes
			glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); 
			glBindBuffer(GL_ARRAY_BUFFER, 0); 
			glBindTexture(GL_TEXTURE_2D, 0 );
			glDisableVertexAttribArray(1);
			glDisableVertexAttribArray(2);
		}
		////
		////Draw FBO as texture
		////This just draws a Quad facing the screen and maps the generated normal and draws it as color
		////
		if(canvas.GetInput().IsKeyDown(sf::Key::Space))
		{//Press Space to show FBO as a texture
			tmProg->Use();
			//draw out texture
			static float vertex[20] = {-0.25f, -0.25, 1.0f, 0.0f,0.0f, 0.25f, -0.25f, 1.0f, 1.0f,0.0f, 0.25f, 0.25f, 1.0f, 1.0f,1.0f, -0.25f, 0.25f, 1.0f, 0.0f,1.0f};
			modelView = glm::mat4(1.0f);
			glUniformMatrix4fv(tmProg->getUniformLocation("modelView"), 1, GL_FALSE, glm::value_ptr(modelView) );
			glUniformMatrix4fv(tmProg->getUniformLocation("perspective"), 1, GL_FALSE, glm::value_ptr(cam.getPerspMatrix()) );//Once per frame


			glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 20, vertex);//Bind vertex input to shader
			glEnableVertexAttribArray(0);			

			glUniform1i(tmProg->getUniformLocation("Sampler"), 0);
			glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 20, (char*)(vertex + 3));//Bind UV input to shader
			glEnableVertexAttribArray(1);		


			glBindTexture(GL_TEXTURE_2D, FBOtex);
			glDrawArrays(GL_TRIANGLE_FAN,0,4);

			glBindBuffer(GL_ARRAY_BUFFER, 0); 
			glBindTexture(GL_TEXTURE_2D, 0 );
			glDisableVertexAttribArray(1);
		}
	}


	void destroyResources()
	{
		delete gmesh;
		delete mesh;
		delete nmVs;
		delete nmFs;
		delete nmGen;
		glDeleteQueries(1, &tfmQuery);//Delete transform feedback queries

	}

	void splitFrontFaces(bool splitAll)
	{
		///Idea/Reccomendation
		//If splitting and generation of new UVBuffer and Mesh is slow, do it in another thread while displaying 
		//old mesh until it is ready, then submit new mesh to GPU on this thread.(GLCalls must all be made from main thread)
		//1)
		std::cout<<"Splitting Frontfacing Vertices\n";
		int index;
		int skipped = 0;
		for(int i = 0; i < pMesh->numVertices; ++i)
		{
			if( uvData[i*3 + 2] > 0 || splitAll)//Check screenspace normal.z of this vertex
			{//If this vertex was frontfacing and has no normal map data generated
				index = i;
				size_t pos = 0;
				VertexID ID = gmesh->index2id(index);
				if( pmDic.find(gmesh->index2id(index)) != pmDic.end() )
				{//Check if we have a valid BitString for this index
					BitString bs = pmDic[gmesh->index2id(index)];
					gmesh->decode(ID, bs, &pos);							
				}
				else
				{
					++skipped;	//Not sure why this happens -.-
				}			
			}
		}
		gmesh->update();	//Generate triangle information

		//Possible optimisation here, instead of deleting, overwrite current arrays/buffers instead
		delete pMesh;			
		pMesh = new Mesh(gmesh);
		std::cout<<"Skipped "<<skipped<<" vertices.\n";
		std::cout<<"New mesh has "<<pMesh->numVertices<<" vertices\n";

	}

	int main()
	{
		sf::ContextSettings Settings;
		Settings.MajorVersion = 3;
		Settings.MinorVersion = 2;//Opengl 3.2	


		//Note: SFML will continuously try to create a context from the requested version by decrementing the requested version
		//upon failure to create the requested version context.
		canvas.Create(sf::VideoMode(WIDTH, HEIGHT, 32), "High Poly Viewer", sf::Style::Close, Settings);

		canvas.UseVerticalSync(true);

		initGL();			//Initialise OpenGL
		loadResources();	//Load resources like mesh/allocate buffers/etc..
		display();			//First frame to generate FBO and UV Coords	
		while(canvas.IsOpened())
		{			
			display2();		//Draw mesh
			handleInput();	//Check for any input
			if(newUV)
			{				//If ENTER was pressed, we must have called display() in handleInput, so retrieve the results.
				getNewUV();
				if( glGetError() != GL_NO_ERROR )
				{			//Check for any OpenGL errors last frame
					std::cout<<"OpenGL error occurred\n";
				}
				if(mode == 1)
				{
					//TODO: 
					//1) go through UVData array and determine vertices which need to be split
					//UVData buffer format is as follows : UVX, UVY, eyespace normal.z
					//2) do Splitting of backfacing vertices to form a new mesh
					//3) Generate a new UVData buffer to match number of vertices of new hybrid mesh??(This could be very slow)



					//construct& fill uvDataIn based on newly generated mesh


				}
			}
			canvas.Display();	//Swap our buffers


			if( glGetError() != GL_NO_ERROR )
			{			//Check for any OpenGL errors last frame
				std::cout<<"OpenGL error occurred\n";
			}

		}
		destroyResources();		//Cleanup


		return 0;

	}
