#include <stdlib.h>
#include <direct.h>
#include <windows.h>
#include <stdlib.h>
#include <xmmintrin.h>

#include "../US2/Devkits/SDL/SDL.h"
#include "../US2/Devkits/FreeImage/FreeImage.h"

#include "../tdp/trender/bellum/tbr_fastscene.hpp"
#include "../tdp/trender/bellum/tbr_visualizer.hpp"
#include "../tdp/trender/bellum/tbr_bspvisibility.hpp"
#include "../tdp/trender/bellum/tbr_sopic.hpp"
#include "../tdp/trender/tdp_renderer.hpp"
#include "../tdp/trender/tdp_glinitializer.hpp"
#include "../tdp/trender/tdp_camera.hpp"
#include "../tdp/trender/tdp_assetmanager.hpp"
#include "../tdp/trender/tdp_materialasset.hpp"
#include "../tdp/tdp_rotation.hpp"
#include "../tdp/tdp_cfile_filesystem.hpp"

#include "../tdp/tdp_string.hpp"
#include "../tdp/tdp_trace.hpp"

extern "C"
{
#include "../tdp/trender/mesa/gl.h"
#include "../tdp/tdp_toolsio.h"
#include "../q3map/progress.h"
#include "../q3map/radiosity.h"
}

using namespace TDP;

// Classdefs
namespace TDP
{
	// No trace
	int (*terTraceFunction)(const terString &str) = NULL;
};

// Parameters
int viewportSize			= 64;
int windowRows				= 8;
int windowColumns			= 14;
float samplingFOV			= 120.0f;
float nearplaneDist			= 1.0f;
float farplaneDist			= 6000.0f;
bool useSphericalAdjust		= true;

int sampleNum = 0;

// Scene management
terRenderer *rr;
R2::RenderContext *rvisualizer;
terBRFastScene *rscene;
terCamera *rcamera;
terMasterAssetManager *rassetManager;
terFileSystem *rfs;
terBRBSPVisibility *rvisibility;
terBRBSPMapAsset *rmap;
terBRBSPModel *rbspModel;
terMaterialAsset *roccluderMaterial;

Rendering::RenderSystem::UnifiedRenderer *runified;

// Files
terFileStream *radPointFile = NULL;
terFileStream *radSampleFile = NULL;
terString mapBasePath;

rradProjectSettings_t projSettings;
terVector<unsigned int> chartSampleCounts;

int outputChartIndex = -1;
unsigned int outputSampleIndex;


// Influence mask
terVector<terReal> influenceMask;
terVector<terVec4, 16> incomingAngle;
terVector<terVec4, 16> normalizedIncomingAngle;
terVector<terVec4, 16> sampleAngleStorage;

terVector<terVec4, 16> tetraSHCoefs[3];
terVector<terVec4, 16> flatSHCoefs[3];
float tetraTotal;

// Camera matrices for tiles
terVector<terMat3x3> cameraMatrices;
terVector<bool> flatFlags;

// OpenGL stuff
HINSTANCE glLibInstance;


class rradGLInitializer : public terGLInitializer
{
public:
	void *GetProcAddress(const char *str) const
	{
		return SDL_GL_GetProcAddress(str);
	}
};

int usage()
{
	terFPrintf(stderr, "Usage: rrad <regen level> <RSP prefix> <BSP/RAD prefix> [options]\n");
	terFPrintf(stderr, "   -res <size>         Change viewport size (default 64)\n");
	terFPrintf(stderr, "   -fov <degrees>      Change sampling FOV (default 120)\n");
	terFPrintf(stderr, "   -backplane <dist>   Change backplane distance (default 4)\n");
	exit(-1);
}

void LambertDirectionToSHVector(const terVec3 &dir, float *out)
{
	// Constant
	out[0] = 1.0f * SH_AMBIENT_FACTOR;

    // Linear
    out[1] = dir[1] * SH_LINEAR_FACTOR;
    out[2] = dir[2] * SH_LINEAR_FACTOR;
    out[3] = dir[0] * SH_LINEAR_FACTOR;

    // Quadratics
    out[4] = ( dir[0]*dir[1] ) * 3.0f*SH_QUADRATIC_FACTOR;
    out[5] = ( dir[1]*dir[2] ) * 3.0f*SH_QUADRATIC_FACTOR;
    out[6] = ( 1.5f*( dir[2]*dir[2] ) - 0.5f ) * SH_QUADRATIC_FACTOR;
    out[7] = ( dir[0]*dir[2] ) * 3.0f*SH_QUADRATIC_FACTOR;
    out[8] = 0.5f*( dir[0]*dir[0] - dir[1]*dir[1] ) * 3.0f*SH_QUADRATIC_FACTOR;

}

int samplesIn = 0;
int samplesOut = 0;

void AccumulateDirectionFast(terVec4 *primaryDirection, const unsigned char *rowBytes, const terVec4 *sampleAngle, int count, int stride)
{
	__m128 accum;


	accum = _mm_setzero_ps();

	for(int i=0;i<count;i++)
	{
		for(int j=0;j<count;j++)
		{
			__m128 converted = _mm_set_ps1((float)(rowBytes[0] + rowBytes[1] + rowBytes[2]));

			accum = _mm_add_ps(accum, _mm_mul_ps(converted, _mm_load_ps(*sampleAngle)));
			rowBytes += 3;
			sampleAngle++;
		}
		rowBytes += stride - count*3;
	}

	_mm_store_ps(*primaryDirection, _mm_add_ps(accum, _mm_load_ps(*primaryDirection)));
}


void SplitLightFast(terVec4 *ambientOut, terVec4 *directedOut, const float *maskPtr, const terVec4 *directionPtr, const terVec4 &primaryDirection, const unsigned char *rowBytes, int count, int stride)
{
	__m128 ambient, directed;

	ambient = _mm_setzero_ps();
	directed = _mm_setzero_ps();

	__m128 pd128 = _mm_load_ps(primaryDirection);

	for(int i=0;i<count;i++)
	{
		for(int j=0;j<count;j++)
		{
			__m128 converted = _mm_set_ps(0.0f, rowBytes[2], rowBytes[1], rowBytes[0]);

			__m128 sampleLight = _mm_mul_ps(converted, _mm_set_ps1(*maskPtr));

			// Figure out what fraction of this is directional
			__m128 multipliedAngle = _mm_mul_ps(_mm_load_ps(*directionPtr), pd128);

			//fraction = (*maskPtr).DotProduct(primaryDirection);
			float fraction = multipliedAngle.m128_f32[0] + multipliedAngle.m128_f32[1] + multipliedAngle.m128_f32[2];

			if(fraction < 0.0f)
				fraction = 0.0f;

			// Split into ambient and directed components
			//ambient += sampleLight * (1.0 - fraction);
			ambient = _mm_add_ps(ambient, _mm_mul_ps(sampleLight, _mm_set_ps1(1.0f - fraction)));
			//directedLight += sampleLight * fraction;
			directed = _mm_add_ps(directed, _mm_mul_ps(sampleLight, _mm_set_ps1(fraction)));

			rowBytes += 3;
			maskPtr++;
			directionPtr++;
		}
		rowBytes += stride - count*3;
	}

	_mm_store_ps(*ambientOut, ambient);
	_mm_store_ps(*directedOut, directed);
}


void InitializeSHCoefs(__m128 baseCoefs[3])
{
	baseCoefs[0] = baseCoefs[1] = baseCoefs[2] = _mm_setzero_ps();
}


void AccumulateSHCoefsFast(const unsigned char *bytes, __m128 baseCoefs[3], unsigned int count, unsigned int stride, unsigned int side, bool flat)
{
	const float *shCoefs;
	__m128 coefs[3];

	if(flat)
		shCoefs = flatSHCoefs[side].Recast<const float>();
	else
		shCoefs = tetraSHCoefs[side].Recast<const float>();

	for(int i=0;i<3;i++)
		coefs[i] = baseCoefs[i];

	for(int i=0;i<count;i++)
	{
		for(int j=0;j<count;j++)
		{
			if(j < i)
			{
				// Below the tetrahedron lower bounds
				shCoefs += 12;
				bytes += 3;
				continue;
			}

			__m128 v = _mm_set_ps1((float)(*bytes));

			for(int subsection=0;subsection<3;subsection++)
			{
				__m128 coefMults = _mm_load_ps(shCoefs);
				coefs[subsection] = _mm_add_ps(coefs[subsection], _mm_mul_ps(coefMults, v));
				shCoefs += 4;
			}
			bytes += 3;
		}
		bytes += stride - count*3;
	}

	for(int i=0;i<3;i++)
		baseCoefs[i] = coefs[i];
}

void RotateCoefsByMatrix(float outCoefs[9], const __m128 inCoefs[3], const terMat3x3 &rMat)
{
    const float sqrt3 = sqrtf(3.0);
	float pIn[9];

	for(int i=0;i<9;i++)
		pIn[i] = inCoefs[i/4].m128_f32[i%4];

    // DC
    outCoefs[0] = pIn[0];

    // Linear
    outCoefs[1] = rMat[1][0]*pIn[3] + rMat[1][1]*pIn[1] + rMat[1][2]*pIn[2];
    outCoefs[2] = rMat[2][0]*pIn[3] + rMat[2][1]*pIn[1] + rMat[2][2]*pIn[2];
    outCoefs[3] = rMat[0][0]*pIn[3] + rMat[0][1]*pIn[1] + rMat[0][2]*pIn[2];

    // Quadratics
    outCoefs[4] = (
          ( rMat[0][0]*rMat[1][1] + rMat[0][1]*rMat[1][0] ) * ( pIn[4] )
        + ( rMat[0][1]*rMat[1][2] + rMat[0][2]*rMat[1][1] ) * ( pIn[5] )
        + ( rMat[0][2]*rMat[1][0] + rMat[0][0]*rMat[1][2] ) * ( pIn[7] )
        + ( rMat[0][0]*rMat[1][0] ) * ( pIn[8] )
        + ( rMat[0][1]*rMat[1][1] ) * ( -pIn[8] )
        + ( rMat[0][2]*rMat[1][2] ) * ( 3.0f*pIn[6] )
        );

    outCoefs[5] =  (
		   ( rMat[1][0]*rMat[2][1] + rMat[1][1]*rMat[2][0] ) * ( pIn[4] )
         + ( rMat[1][1]*rMat[2][2] + rMat[1][2]*rMat[2][1] ) * ( pIn[5] )
         + ( rMat[1][2]*rMat[2][0] + rMat[1][0]*rMat[2][2] ) * ( pIn[7] )
         + ( rMat[1][0]*rMat[2][0] ) * ( pIn[8] )
         + ( rMat[1][1]*rMat[2][1] ) * ( -pIn[8] )
         + ( rMat[1][2]*rMat[2][2] ) * ( 3.0f*pIn[6] )
		 );

    outCoefs[6] = (
                ( rMat[2][1]*rMat[2][0] ) * ( pIn[4] )
         +      ( rMat[2][2]*rMat[2][1] ) * ( pIn[5] )
         +      ( rMat[2][0]*rMat[2][2] ) * ( pIn[7] )
         + 0.5f*( rMat[2][0]*rMat[2][0] ) * ( pIn[8])
         + 0.5f*( rMat[2][1]*rMat[2][1] ) * ( -pIn[8])
         + 1.5f*( rMat[2][2]*rMat[2][2] ) * ( pIn[6])
         - 0.5f * ( pIn[6] )
            );

    outCoefs[7] =  (
		   ( rMat[0][0]*rMat[2][1] + rMat[0][1]*rMat[2][0] ) * ( pIn[4] )
         + ( rMat[0][1]*rMat[2][2] + rMat[0][2]*rMat[2][1] ) * ( pIn[5] )
         + ( rMat[0][2]*rMat[2][0] + rMat[0][0]*rMat[2][2] ) * ( pIn[7] )
         + ( rMat[0][0]*rMat[2][0] ) * ( pIn[8] )
         + ( rMat[0][1]*rMat[2][1] ) * ( -pIn[8] )
         + ( rMat[0][2]*rMat[2][2] ) * ( pIn[6] )
		 );

    outCoefs[8] = (
                ( rMat[0][1]*rMat[0][0] -rMat[1][1]*rMat[1][0] ) * ( pIn[4] )
          +     ( rMat[0][2]*rMat[0][1] -rMat[1][2]*rMat[1][1] ) * ( pIn[5] )
          +     ( rMat[0][0]*rMat[0][2] -rMat[1][0]*rMat[1][2] ) * ( pIn[7] )
          +0.5f*( rMat[0][0]*rMat[0][0] -rMat[1][0]*rMat[1][0] ) * ( pIn[8] )
          +0.5f*( rMat[0][1]*rMat[0][1] -rMat[1][1]*rMat[1][1] ) * ( -pIn[8] )
          +0.5f*( rMat[0][2]*rMat[0][2] -rMat[1][2]*rMat[1][2] ) * ( 3.0f*pIn[6] )
		  );
}

static unsigned int totalSamplesProcessed = 0;

void EmitSingleTileInfo(const unsigned char *bytes, int cameraMatrixIndex)
{
	int x, y, i;
	int stride = 3 * windowColumns * viewportSize;
	const terReal *maskPtr;
	const terVec4 *directionPtr;
	const unsigned char *rowBytes;
	rradSample_t result;
	__declspec(align(16)) terVec4 ambient, primaryDirection, sampleDirection, sampleAngle, directedLight;
	terReal maskVal;
	terReal influence;
	terReal fraction;
	terMat3x3 mat;
	terVec3 flip(1, 1, -1);

	__m128 redCoefs[3];
	__m128 greenCoefs[3];
	__m128 blueCoefs[3];

	bool flat = flatFlags[cameraMatrixIndex];

	InitializeSHCoefs(redCoefs);
	AccumulateSHCoefsFast(bytes, redCoefs, viewportSize, stride, 0, flat);
	AccumulateSHCoefsFast(bytes+viewportSize*3, redCoefs, viewportSize, stride, 1, flat);
	AccumulateSHCoefsFast(bytes+viewportSize*3*2, redCoefs, viewportSize, stride, 2, flat);

	InitializeSHCoefs(greenCoefs);
	AccumulateSHCoefsFast(bytes+1, greenCoefs, viewportSize, stride, 0, flat);
	AccumulateSHCoefsFast(bytes+1+viewportSize*3, greenCoefs, viewportSize, stride, 1, flat);
	AccumulateSHCoefsFast(bytes+1+viewportSize*3*2, greenCoefs, viewportSize, stride, 2, flat);

	InitializeSHCoefs(blueCoefs);
	AccumulateSHCoefsFast(bytes+2, blueCoefs, viewportSize, stride, 0, flat);
	AccumulateSHCoefsFast(bytes+2+viewportSize*3, blueCoefs, viewportSize, stride, 1, flat);
	AccumulateSHCoefsFast(bytes+2+viewportSize*3*2, blueCoefs, viewportSize, stride, 2, flat);

	float redCoefsFinal[9];
	float greenCoefsFinal[9];
	float blueCoefsFinal[9];

	RotateCoefsByMatrix(redCoefsFinal, redCoefs, cameraMatrices[cameraMatrixIndex]);
	RotateCoefsByMatrix(greenCoefsFinal, greenCoefs, cameraMatrices[cameraMatrixIndex]);
	RotateCoefsByMatrix(blueCoefsFinal, blueCoefs, cameraMatrices[cameraMatrixIndex]);

	if(outputChartIndex == chartSampleCounts.Count())
		return;		// Ran out of files (this should only ever happen once)

	while(outputChartIndex == -1 || outputSampleIndex == chartSampleCounts[outputChartIndex])
	{
		if(radSampleFile != NULL)
		{
			delete radSampleFile;
			radSampleFile = NULL;
		}

		outputChartIndex++;
		outputSampleIndex = 0;
		if(outputChartIndex == chartSampleCounts.Count())
			return;		// Ran out of files (this should only ever happen once)
	}

	if(radSampleFile == NULL)
	{
		radSampleFile = rfs->OpenUnprotectedWrite(mapBasePath + "_photons/chart" + terString(outputChartIndex) + ".radsamples");
		if(!radSampleFile)
		{
			terFPrintf(stderr, "ERROR: Could not open radiosity sample file.");
			exit(1);
		}

		printf("Started output chart %i at sample %i\n", outputChartIndex, totalSamplesProcessed);

	}

	outputSampleIndex++;
	totalSamplesProcessed++;

	for(i=0;i<9;i++)
	{
		terVec3 finalCoef = terVec3(redCoefsFinal[i], greenCoefsFinal[i], blueCoefsFinal[i]) / tetraTotal;
		radSampleFile->Write(&finalCoef, sizeof(finalCoef));
	}

#if 0

	mat = cameraMatrices[cameraMatrixIndex];

	// Calculate the primary direction
	primaryDirection = terVec4(0, 0, 0, 0);

	directionPtr = incomingAngle;
	ambient = terVec4(0, 0, 0, 0);

	AccumulateDirectionFast(&primaryDirection, bytes, directionPtr, viewportSize, stride);

	/*
	for(y=0;y<viewportSize;y++)
	{
		rowBytes = bytes + stride * y;
		AccumulateDirectionFast(&primaryDirection, rowBytes, directionPtr, viewportSize);

		rowBytes += 3*viewportSize;
		directionPtr += viewportSize;
	}
	*/

	//if(primaryDirection.Vec3() == terVec3(0, 0, 0))
	//	terPrintf("WARNING: Sample with no primary direction!\n");

	primaryDirection = terVec4(primaryDirection.Vec3().Normalize(), 0.0);

	maskPtr = influenceMask;
	directionPtr = normalizedIncomingAngle;

	ambient = terVec4(0, 0, 0, 0);
	directedLight = terVec4(0, 0, 0, 0);

	SplitLightFast(&ambient, &directedLight, maskPtr, directionPtr, primaryDirection, bytes, viewportSize, stride);
	/*
	for(y=0;y<viewportSize;y++)
	{
		rowBytes = bytes + stride * y;
		for(x=0;x<viewportSize;x++)
		{
			maskVal = *maskPtr;
			sampleAngle = *directionPtr;

			

			terVec4 sampleLight = terVec4(rowBytes[0], rowBytes[1], rowBytes[2], 0.0) * maskVal;

			// Find what fraction of this light will be ambient
			// and what will be directed
			fraction = sampleAngle.DotProduct(primaryDirection);

			if(fraction < 0.0f)
				fraction = 0.0f;

			// Split into ambient and directed components
			ambient += sampleLight * (1.0 - fraction);
			directedLight += sampleLight * fraction;

			rowBytes += 3;
			maskPtr++;
			directionPtr++;
		}
	}
	*/

	// Transform out of camera space
	primaryDirection = terVec4((flip * primaryDirection.Vec3()) % mat, 0.0);

	samplesOut++;
	for(i=0;i<3;i++)
	{
		result.ambientLight[i] = ambient[i];
		result.directedLight[i] = directedLight[i];
		result.normal[i] = primaryDirection[i];
	}
	radSampleFile->Write(&result, sizeof(result));
#endif
}

void EmitRadiosityInfo(const unsigned char *bytes, unsigned int maxTiles)
{
	int x, y, n;
	int stride = 3 * windowColumns * viewportSize;
	int fullRowStride = stride * viewportSize;

	n = 0;
	for(y=0;y<windowRows;y++)
	{
		for(x=0;x<windowColumns;x+=3)
		{
			EmitSingleTileInfo(bytes + fullRowStride*y + 3*viewportSize*x, n++);
			if(maxTiles == 0)
				return;
			maxTiles-=3;
			n += 2;
			sampleNum++;
		}
	}
}

void ComputeTetraAdjusts()
{
	float val5 = sqrt(1.0 / 3.0);
	float val7 = 0.78867513f;
	float val2 = 1.0f - val7;

	terVec3 rowVals7n25(val7, -val2, val5);
	terVec3 rowValsn275(-val2, val7, val5);
	terVec3 rowValsn5n55(-val5, -val5, val5);

	terMat3x3 adjustMatrices[3];
	
	adjustMatrices[0] = terMat3x3(rowVals7n25, rowValsn275, rowValsn5n55);
	adjustMatrices[1] = terMat3x3(rowValsn5n55, rowVals7n25, rowValsn275);
	adjustMatrices[2] = terMat3x3(rowValsn275, rowValsn5n55, rowVals7n25);

	float totalSamples = viewportSize * viewportSize * 3;
	tetraTotal = 0.0f;

	for(int side=0;side<3;side++)
	{
		tetraSHCoefs[side].Alloc(viewportSize*viewportSize*3);
		flatSHCoefs[side].Alloc(viewportSize*viewportSize*3);
		for(int y=0;y<viewportSize;y++)
		{
			float yt = 1.0f - (((terReal)y + 0.5) / (terReal)viewportSize * 3.0f);

			for(int x=0;x<viewportSize;x++)
			{
				float xt = ((terReal)x + 0.5) / (terReal)viewportSize * 3.0f - 2.0f;
				float lambert, sh, adjustedContribution;

				terVec3 baseCoord(xt, yt, 1.0f);
				terVec3 coord = (baseCoord % adjustMatrices[side]).Normalize();

				// The warp compensation here's a strange thing
				// Hugo Elias's writeup claims that the warp compensation should be the cosine of the angle from the pixel
				// to the camera, which would be coord[2]
				//
				// On a curved manifold, attempting to compute the angle space taken up by a pixel would be something like:
				// Warp compensation = arctan(t / minDist)'
				// which works out to be a[2]*a[2]
				//
				// However, double-checking it against a Monte Carlo estimator on a hemisphere manifold
				// resolves it to a[2]*a[2]*a[2]
				// The last a[2] appears to be lambert...?
				float angleCorrection = baseCoord.Normalize()[2];

				// Calculate SH coefs
				terVec4 *output = tetraSHCoefs[side] + (y*viewportSize + x)*3;
				terVec4 *flatOutput = flatSHCoefs[side] + (y*viewportSize + x)*3;

				if(baseCoord[2] <= 0.0f)
				{
					output[0] = output[1] = output[2] = terVec4(0.0f);
					flatOutput[0] = flatOutput[1] = flatOutput[2] = terVec4(0.0f);
				}
				else
				{
					terVec3 rCoord = coord * terVec3(1.0f, -1.0f, -1.0f);
					// Warp correction
					angleCorrection *= angleCorrection*angleCorrection;

					float lambertConstantCoef = 0.25f;
					float lambertLinearCoef = 0.5f/3.0f;
					float lambertQuadCoef = 0.036064400252732215;
					float sqrt3 = sqrtf(3.0f);
					float rsqrt3 = sqrtf(1.0f/3.0f);
					float sqrt3quad = sqrt3*SH_QUADRATIC_FACTOR;

					output[0] = terVec4(SH_AMBIENT_FACTOR,
						SH_LINEAR_FACTOR*rCoord[1],
						SH_LINEAR_FACTOR*rCoord[2],
						SH_LINEAR_FACTOR*rCoord[0]) * angleCorrection;
					__m128 test = _mm_load_ps((float *)output);

					output[1] = terVec4(
						3.0f*rCoord[0]*rCoord[1],
						3.0f*rCoord[2]*rCoord[1],
						1.5f*(rCoord[2]*rCoord[2] -  0.5f),
						3.0f*rCoord[2]*rCoord[0]) * angleCorrection * SH_QUADRATIC_FACTOR;

					output[2] = terVec4(SH_QUADRATIC_FACTOR * angleCorrection * 
						3.0f*(rCoord[0]*rCoord[0] - rCoord[1]*rCoord[1])*0.5f,
						0.0f, 0.0f, 0.0f);
					
					flatOutput[0] = terVec4(-rCoord[2], 0.0f, 0.0f, 0.0f);
					flatOutput[1] = flatOutput[2] = terVec4(0.0f);

					tetraTotal += angleCorrection;
				}
			}
		}
	}
}

// This compensates for the varying angle coverage of each pixel
void ComputeInfluenceMask()
{
	int i, x, y;
	terReal frustumHalfWidth;
	terReal minDist;
	terReal maxSideOffset;
	terReal t, o;
	terReal lastO, angleDist, totalCoverage;
	terReal xt, yt;
	terVec4 *angleResult;
	terVec4 *nAngleResult;
	terReal *influenceResult;
	terVec3 a;
	terReal angleInfluence;
	terReal angleAdj;

	terVector<terReal> angleAdjust;

	frustumHalfWidth = terAngleDegrees(samplingFOV * 0.5f).Tan();

	angleAdjust.Alloc(viewportSize);

	o = 0.0f;

	minDist = terAngleDegrees(samplingFOV * 0.5f).Cos();
	maxSideOffset = terAngleDegrees(samplingFOV * 0.5f).Sin();

	// Compute compensation for pixel angle warp
	// This is normally
	for(i=0;i<=viewportSize;i++)
	{
		lastO = o;

		// See where this point is along the frustum
		t = ((terReal)i / (terReal)viewportSize * 2.0f - 1.0f) * maxSideOffset;

		o = atan2(t, minDist);

		if(!i)
			continue;

		// Save the angle difference
		angleDist = o - lastO;

		if(useSphericalAdjust)
			angleAdjust[i-1] = angleDist;
		else
			angleAdjust[i-1] = 1;
	}

	influenceMask.Alloc(viewportSize * viewportSize);
	incomingAngle.Alloc(viewportSize * viewportSize);
	normalizedIncomingAngle.Alloc(viewportSize * viewportSize);
	sampleAngleStorage.Alloc(viewportSize * viewportSize);

	angleResult = incomingAngle;
	nAngleResult = normalizedIncomingAngle;
	influenceResult = influenceMask;

	totalCoverage = 0.0f;


	for(y=0;y<viewportSize;y++)
	{
		yt = (((terReal)y + 0.5) / (terReal)viewportSize * 2.0f - 1.0f) * maxSideOffset;
		yt *= frustumHalfWidth;
		for(x=0;x<viewportSize;x++)
		{
			xt = (((terReal)x + 0.5) / (terReal)viewportSize * 2.0f - 1.0f) * maxSideOffset;
			xt *= frustumHalfWidth;

			// Project this on to a sphere
			a = terVec3(xt, yt, minDist).Normalize();

			// The warp compensation here's a strange thing
			// Hugo Elias's writeup claims that the warp compensation should be the cosine of the angle from the pixel
			// to the camera, which would be a[2]
			//
			// On a linear manifold, attempting to compute the angle space taken up by a pixel would be something like:
			// Warp compensation = arctan(t / minDist)'
			// (where t = -1..1) works out to be a[2]*a[2]
			//
			// However, double-checking it against a Monte Carlo estimator resolves it to a[2]*a[2]*a[2]
			// Last *a[2] is probably Lambert, different seems negligible...
			angleAdj = a[2]*a[2];

			// Angle results aren't weighted by influence to produce sharper results
			*angleResult++ = terVec4(a * angleAdj, 0);
			*nAngleResult++ = terVec4(a, 0);

			if(a[2] < minDist)
				angleInfluence = 0.0f;
			else
			{
				angleInfluence = angleAdj * (a[2] - minDist) / (1.0 - minDist);
				totalCoverage += angleInfluence;
			}
			*influenceResult++ = angleInfluence;
		}
	}

	// Rescale everything by the total influence
	influenceResult = influenceMask;
	for(y=0;y<viewportSize;y++)
	{
		for(x=0;x<viewportSize;x++)
		{
			*influenceResult = (*influenceResult) / totalCoverage;
			influenceResult++;
		}
	}
}

typedef void (__stdcall * PFNGLENABLEPROC) ( GLenum );
typedef void (__stdcall * PFNGLDEPTHMASKPROC) ( GLboolean flag );
typedef void (__stdcall * PFNGLCLEARPROC) ( GLbitfield );
typedef void (__stdcall * PFNGLCLEARCOLORPROC) ( GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha );
typedef void (__stdcall * PFNGLCLEARDEPTHPROC) ( GLclampd );
typedef void (__stdcall * PFNGLVIEWPORTPROC)( GLint x, GLint y, GLsizei width, GLsizei height );
typedef void (__stdcall * PFNGLREADPIXELSPROC )(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid *pixels);
typedef void (__stdcall * PFNGLPIXELSTOREIPROC) ( GLenum pname, GLint param );
typedef void (__stdcall * PFNGLFINISHPROC) ( void );

PFNGLENABLEPROC rrglEnable;
PFNGLDEPTHMASKPROC rrglDepthMask;
PFNGLCLEARPROC rrglClear;
PFNGLCLEARCOLORPROC rrglClearColor;
PFNGLCLEARDEPTHPROC rrglClearDepth;
PFNGLVIEWPORTPROC rrglViewport;
PFNGLREADPIXELSPROC rrglReadPixels;
PFNGLPIXELSTOREIPROC rrglPixelStorei;
PFNGLFINISHPROC rrglFinish;



void ComputeRadiosity()
{
	terReal yaw, pitch;
	rradPoint_t radPoint;
	rradSample_t radSample;
	terRotation pitchRotation;
	terRotation yawRotation;
	terRotation cameraRotation;
	terVec3 randomNormal;
	terVec3 cameraPosition, testNormal;
	SDL_Event e;
	terVector<unsigned char> result;
	int row, column;
	int i;

	Progress_Init();

	rrglClear = (PFNGLCLEARPROC) SDL_GL_GetProcAddress("glClear");
	rrglEnable = (PFNGLENABLEPROC) SDL_GL_GetProcAddress("glEnable");
	rrglDepthMask = (PFNGLDEPTHMASKPROC) SDL_GL_GetProcAddress("glDepthMask");
	rrglClearDepth = (PFNGLCLEARDEPTHPROC) SDL_GL_GetProcAddress("glClearDepth");
	rrglClearColor = (PFNGLCLEARCOLORPROC) SDL_GL_GetProcAddress("glClearColor");
	rrglViewport = (PFNGLVIEWPORTPROC) SDL_GL_GetProcAddress("glViewport");
	rrglReadPixels = (PFNGLREADPIXELSPROC) SDL_GL_GetProcAddress("glReadPixels");
	rrglPixelStorei = (PFNGLPIXELSTOREIPROC) SDL_GL_GetProcAddress("glPixelStorei");
	rrglFinish = (PFNGLFINISHPROC) SDL_GL_GetProcAddress("glFinish");

	result.Alloc(viewportSize * windowColumns * viewportSize * windowRows * 3);

	rrglPixelStorei(GL_UNPACK_ALIGNMENT, 1);
	rrglClearDepth( 1.0f );
	rrglClearColor( 0, 0, 0, 0 );

	row = 0;
	column = -1;

	// Get a good framebuffer so we don't get garbage captures
	for(i=0;i<3;i++)
	{
		rrglDepthMask(GL_TRUE);
		rrglClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
		SDL_GL_SwapBuffers();
	}

	terBRPic occluderPic;

	occluderPic.LinkToScene(rscene);
	occluderPic.SetMaterial(roccluderMaterial);

	Progress_SetMessage("Accumulating global ambient light...");
	Progress_SetLoad(projSettings.numCharts);

	unsigned int inputChartIndex = 0;

	int emptyLoads = 0;
	while(true)
	{
		if(radPointFile == NULL)
		{
			if(inputChartIndex == projSettings.numCharts)
				return;	// FINISHED!

			radPointFile = rfs->Open(mapBasePath + "_photons/chart" + terString(inputChartIndex) + ".radpoints");
			if(!radPointFile)
			{
				terFPrintf(stderr, "ERROR: Could not open radiosity point file.");
				exit(1);
			}

			// Found out how many samples need to be processed for this one
			chartSampleCounts[inputChartIndex] = radPointFile->Size() / sizeof(rradPoint_t);

			inputChartIndex++;
			Progress_SetProgress(inputChartIndex);
		}

		samplesIn++;
		radPointFile->Read(&radPoint, sizeof(radPoint));

		if(radPointFile->Tell() == radPointFile->Size())
		{
			//
			delete radPointFile;
			radPointFile = NULL;
		}

		// Change viewport
		for(int side=0;side<3;side++)
		{
			column++;
			if(column == windowColumns)
			{
				column = 0;
				row++;
			}

			if(row == windowRows)
			{
				row = 0;
				rrglReadPixels(0, 0, viewportSize * windowColumns, viewportSize * windowRows, GL_RGB, GL_UNSIGNED_BYTE, result);	// RGB, UNSIGNED_BYTE
				SDL_GL_SwapBuffers();
				EmitRadiosityInfo(result, windowColumns * windowRows);

				rrglDepthMask(GL_TRUE);
				rrglClearDepth(1.0f);
				rrglClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
			}

			rrglViewport(column * viewportSize, row * viewportSize, viewportSize, viewportSize);

			// Compute the pitch and yaw of the camera at this point
			yaw = atan2(radPoint.normal[0], radPoint.normal[2]);
			pitch = -asin(radPoint.normal[1]) + 3.141592654;

			// Compute the camera rotation
			yawRotation = terRotation(terVec3(0, 1, 0), terAngleRadians(yaw));
			pitchRotation = terRotation(terVec3(1, 0, 0), terAngleRadians(pitch));

			cameraRotation = yawRotation % pitchRotation;

			testNormal = -cameraRotation.Matrix().Inverse()[2];

			if(testNormal.DotProduct(terVec3(radPoint.normal)) < 0.99)
				terFPrintf(stderr, "WARNING: Singularity detected!   %f %f %f\n", radPoint.normal[0], radPoint.normal[1], radPoint.normal[2]);

			terMat3x3 cameraMatrix = cameraRotation.Matrix();
			cameraMatrices[row * windowColumns + column] = cameraMatrix;
			flatFlags[row * windowColumns + column] = (radPoint.flat != 0);

			// Rotate to align to a tetrahedron side
			{
				terVec3 xVector = cameraMatrix % terVec3(1, 0, 0);
				terVec3 yVector = cameraMatrix % terVec3(0, 1, 0);
				terVec3 zVector = cameraMatrix % terVec3(0, 0, 1);

				terVec3 sideBoundVector = (xVector + yVector).Normalize();

				cameraRotation = terRotation(sideBoundVector, terAngleDegrees(54.735610317245345684622999669981)) % cameraRotation;
				cameraRotation = terRotation(zVector, terAngleDegrees(120.0f * (float)side)) % cameraRotation;
			}


			// Nudge the camera slightly off the sample point
			cameraPosition = terVec3(radPoint.point);

			rcamera->SetPosition(cameraPosition);
			rcamera->SetRotation(cameraRotation);

			//printf("Should be: %f %f %f\n", radNormal[0], radNormal[1], radNormal[2]);
			//printf("Actually:  %f %f %f\n\n", testNormal[0], testNormal[1], testNormal[2]);

			rvisibility->AcquireVisibility(rcamera);

			// Try drawing
			for(;;)
			{
				terVec2 dummy2(0.0f, 0.0f);

				rassetManager->LoadAssets(terAssetUrgencyUnused_k);
				
				runified->FrontEnd()->PumpMaterials();

				rscene->Draw(rcamera);
				rr->DrawPending();

				runified->BackEnd()->Digest();

				occluderPic.QueueSurfaces(dummy2, dummy2, dummy2, dummy2, rr);
				rr->DrawPending();

				if(rassetManager->NumPendingAssets() == 0)
					break;
			}
		}

		// Flush the event queue
		while(SDL_PollEvent(&e))
		{
			if(e.type == SDL_QUIT)
				exit(1);
		}
	}

	terPrintf("Finalizing...\n");

	// Flush any remaining samples
	rrglFinish();
	rrglReadPixels(0, 0, viewportSize * windowColumns, viewportSize * windowRows, GL_RGB, GL_UNSIGNED_BYTE, result);	// RGB, UNSIGNED_BYTE
	EmitRadiosityInfo(result, row * windowColumns + column);

	SDL_GL_SwapBuffers();
}

static terAssetConfigTable configTable[] =
{
	{ "useYCoCg", terAssetConfigTypeBoolean_k, false, 0.0f },
	{ "srgbTexture", terAssetConfigTypeBoolean_k, false, 0.0f },
	{ "srgbFramebuffer", terAssetConfigTypeBoolean_k, true, 0.0f },
	{ "bumpMappedTerrain", terAssetConfigTypeBoolean_k, false, 0.0f },
	{ "colorCorrection", terAssetConfigTypeBoolean_k, true, 0.0f },
	{ "noBumpMaps", terAssetConfigTypeBoolean_k, true, 0.0f },
};

extern "C" int main(int argc, char *argv[])
{
	SDL_Surface *surf;
	rradGLInitializer init;
	terString baseName;
	terString bspName;
	terString cwd;
	int regenLevel;

	if(argc < 4)
		usage();

	regenLevel = atoi(argv[1]);
	baseName = argv[2];
	bspName = argv[3];

	argc -= 4;
	argv += 4;

	while(argc)
	{
		if(!strcmp(argv[0], "-near"))
		{
			argc--; argv++;
			if(!argc) usage();
			nearplaneDist = atof(argv[0]);
			//printf("Near plane distance is %f\n", nearplaneDist);
			argc--; argv++;
		}
		if(!strcmp(argv[0], "-far"))
		{
			argc--; argv++;
			if(!argc) usage();
			farplaneDist = atof(argv[0]);
			//printf("Far plane distance is %f\n", farplaneDist);
			argc--; argv++;
		}
		else if(!strcmp(argv[0], "-res"))
		{
			argc--; argv++;
			if(!argc) usage();
			viewportSize = atoi(argv[0]);
			//printf("Viewport resolution is %i\n", viewportSize);
			argc--; argv++;
		}
		else if(!strcmp(argv[0], "-fov"))
		{
			argc--; argv++;
			if(!argc) usage();
			samplingFOV = atof(argv[0]);
			//printf("Sampling field angle is %f\n", samplingFOV);
			argc--; argv++;
		}
		else if(!strcmp(argv[0], "-nosphere"))
		{
			argc--; argv++;
			//printf("Disabled spherical projection adjustment\n");
			useSphericalAdjust = false;
		}
		else
		{
			argc--;
			argv++;
		}
	}

	windowRows = 480 / viewportSize;
	windowColumns = 640 / viewportSize;

	// Round up to 3
	windowColumns += 2;
	windowColumns -= windowColumns % 3;

	cameraMatrices.Alloc(windowRows * windowColumns);
	flatFlags.Alloc(windowRows * windowColumns);

	cwd = terString(getcwd(NULL, 0)) + "/";

	// Set up libraries
	//printf("[  ] Initializing FreeImage...\r");
	FreeImage_Initialise(1);
	//printf("\r[OK\n");

	//printf("[  ] Loading opengl32.dll...\r");
	glLibInstance = LoadLibrary("opengl32.dll");
	if(!glLibInstance)
	{
		//printf("\nFailed!");
		exit(-1);
	}
	//printf("[OK\n");

	//printf("[  ] Initializing video... ");
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_NOPARACHUTE))
		return -1;
	//printf("\r[OK\n");

	//printf("[  ] Initializing OpenGL... ");
	if(SDL_GL_LoadLibrary(NULL))
	{
		SDL_QuitSubSystem(SDL_INIT_VIDEO);
		return -1;
	}
	//printf("\r[OK\n");

	// Enable double buffering
	SDL_GL_SetAttribute (SDL_GL_DOUBLEBUFFER, 1);

	// Set up 2D video
	//printf("[  ] Setting up video... ");
	SDL_GL_SetAttribute (SDL_GL_RED_SIZE, 8);
	SDL_GL_SetAttribute (SDL_GL_GREEN_SIZE, 8);
	SDL_GL_SetAttribute (SDL_GL_BLUE_SIZE, 8);
	SDL_GL_SetAttribute (SDL_GL_DEPTH_SIZE, 24);
	SDL_GL_SetAttribute (SDL_GL_STENCIL_SIZE, 0);

	// Use 4x4 multisample to reduce error cheaply
	SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, 1);
	SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, 4);

	surf = SDL_SetVideoMode(viewportSize * windowColumns, viewportSize * windowRows, 32, SDL_OPENGL);
	if(!surf)
		return -1;
	//printf("\r[OK\n");

	SDL_WM_SetCaption("Global Illumination", "Global Illumination");

	//printf("[  ] Creating scene... ");
	rvisualizer = new R2::RenderContext();

	rfs = new terCFSFileSystem(cwd.CStr());
	rassetManager = new terMasterAssetManager(rfs, rvisualizer);
	rassetManager->SetRegenerationPriority(regenLevel);

	rassetManager->SetConfigTable(configTable, sizeof(configTable) / sizeof(configTable[0]));

	rr = terNewGLRenderer(&init, rvisualizer);

	// Add handlers
	rassetManager->AddHandler(terRenderer::ClassDef(), rr);
	rassetManager->AddHandler(terMaterialSystemHost::ClassDef(), rvisualizer->MSH());

	runified = new Rendering::RenderSystem::UnifiedRenderer(rvisualizer);

	// Get occluder
	roccluderMaterial = (terMaterialAsset *)rassetManager->GetAsset(terMaterialAsset::ClassDef(), "radoccluder", terAssetUrgencyASAP_k);

	// Set up scene
	rscene = (terBRFastScene *)rvisualizer->NewScene(rassetManager);

	rcamera = new terCamera();

	rmap = new terBRBSPMapAsset(bspName + ".tgeo", terAssetUrgencyASAP_k);
	rassetManager->AddAsset(terBRBSPMapAsset::ClassDef(), rmap);
	rbspModel = new terBRBSPModel();
	rbspModel->SetModel(rmap, 0);

	rvisibility = new terBRBSPVisibility(rmap);
	rbspModel->SetVisibility(rvisibility);

	rscene->AddObject(rbspModel);

	//printf("\r[OK\n");

	terPrintf("Computing global illumination...\n");

	//rcamera->SetHFOV(terAngleDegrees(samplingFOV));
	//rcamera->SetVFOVWithAspectRatio(1);
	rcamera->SetHFOV(terAngleDegrees(112.61986494804042617294901087668));
	rcamera->SetVFOV(terAngleDegrees(112.61986494804042617294901087668));
	rcamera->SetHPan(-1.0f / 6.0f);
	rcamera->SetVPan(1.0f / 6.0f);

	rcamera->SetNearPlane(nearplaneDist);
	rcamera->SetFarPlane(farplaneDist);

	ComputeTetraAdjusts();
	ComputeInfluenceMask();

	// Load settings and set up FS stuff
	{
		terSafePointer<terFileStream> projStream = rfs->Open(baseName + ".radproject");
		projStream->Read(&projSettings, sizeof(projSettings));

		chartSampleCounts.Alloc(projSettings.numCharts);
		mapBasePath = baseName;
	}

	try
	{
		ComputeRadiosity();
	}
	catch(terException &e)
	{
		terFPrintf(stderr, "TDP crashed: %s // %s", e.ErrorMsg(), e.AuxMsg());
		return 1;
	}

	return 0;
}
