#include "Renderer.h"
//#include "../Geometry/Triangle.h"
//#include "../Geometry/Sphere.h"
#include "../Geometry/DynSphere.h"
#include "../Geometry/UVSphere.h"
#include "Sample/Sample.h"
#include "../Basis/Image.h"
#include "../Basis/Vector2.h"
#include "../Basis/rgb.h"
#include "../Camera/Camera.h"
#include "../Texture/MarbleTexture.h"
#include "../Texture/NoiseTexture.h"

#include <fstream>
using namespace std;

Renderer::Renderer()
{
	m_pImage = NULL;
	m_pCamera = NULL;
	m_pSampler = NULL;
	m_vShapes.clear();
}

Renderer::~Renderer()
{
	if (m_pImage)
		delete m_pImage;
	m_pImage = NULL;
	if (m_pCamera)
		delete m_pCamera;
	m_pCamera = NULL;
	if (m_pSampler)
		delete m_pSampler;
	m_pSampler = NULL;
	m_vShapes.clear();
}

void Renderer::init(int width, int height)
{
	m_pImage = new Image(width, height);
	m_pSampler = new Sampler();

	//	init the camera with lens
	//Vector3 c, Vector3 gaze, Vector3 vup, float aperture, float left, float right, float bottom, float top, float distance
	Vector3 center(0.0f, 0.0f, 0.0f);
	Vector3 gaze(0.0f, 0.0f, -1.0f);
	Vector3 up(0.0f, 1.0f, 0.0f);
	//	we set f-number(distance / aperture) = 3.3 to get an obvious result for depth of field
	float aperture = 161.0f;
	float distance = 900.0f;
	m_pCamera = new Camera(center, gaze, up, aperture, -width / 2.0f, width / 2.0f, -height / 2.0f, height / 2.0f, distance);
}

void Renderer::renderLoop()
{
	HitRecord rec;
	bool is_a_hit;
	float tmax;

	//	geometry
//	m_vShapes.push_back(new DynSphere(Vector3(-50.0f, 50.0f, -900.0f), 50, rgb(.2f, .8f, .2f), 0.0f, 10.0f));
// 	m_vShapes.push_back(new Sphere(Vector3(-50.0f, -50.0f, -1000.0f), 150, rgb(.2f, .2f, .8f)));
// 	m_vShapes.push_back(new Triangle(Vector3(0.0f, 300.0f, -800.0f),
// 		Vector3(-300.0f, -200.0f, -1000.0f),
// 		Vector3(150.0f, -280.0f, -1000.0f),
// 		rgb(.8f, .2f, .2f)));

	//MarbleTexture* tex = new MarbleTexture(0.3f);
	NoiseTexture* tex = new NoiseTexture();
	m_vShapes.push_back(new UVSphere(Vector3(-50.0f, 50.0f, -900.0f), 250, tex));

	int width = m_pImage->getWidth();
	int height = m_pImage->getHeight();
	int nShapes = m_vShapes.size();

	int nSamples = 4;
	Vector2 *pixelSamples = new Vector2[nSamples];
	Vector2 *lensSamples = new Vector2[nSamples];
	//	exposure samples should be sampled in the exposure time, not the time indicated in DynSphere
	float *exposureSamples = new float[nSamples];
	rgb color(0.0f, 0.0f, 0.0f);
	rgb bgcolor(0.2f, 0.2f, 0.2f);
	Ray r;
	for (int i = 0; i < width; i++)
	{
		for (int j = 0; j < height; j++)
		{
			//	for each pixel, we generate nSamples samples
			//	and then average the results
			color.setRed(0.0f);
			color.setGreen(0.0f);
			color.setBlue(0.0f);
			m_pSampler->multiJitter(pixelSamples, nSamples);
			m_pSampler->multiJitter(lensSamples, nSamples);
			m_pSampler->jitter(exposureSamples, nSamples);
			for (int k = 0; k < nSamples; k++)
			{
				tmax = 100000.0f;
				is_a_hit = false;
				r = m_pCamera->getRay((pixelSamples[k].x() + (float)i - 0.5f) / (float)width, 
					(pixelSamples[k].y() + (float)j - 0.5f) / (float)height, 
					lensSamples[k].x(), lensSamples[k].y());

				for (int s = 0; s < nShapes; s++)
				{
					if (m_vShapes[s]->hit(r, .00001f, tmax, exposureSamples[k], rec))
					{
						tmax = rec.t;
						is_a_hit = true;
					}
				}

				if (is_a_hit)
					color += rec.hit_tex->value(rec.uv, rec.hit_p);
				else
					color += bgcolor;
			}
			color /= (float)nSamples;
			m_pImage->set(i, j, color);
		}
	}
	m_pImage->gammaCorrect(2.2f);

	delete pixelSamples;
	delete lensSamples;
	delete exposureSamples;
	delete tex;
	for (int i = 0; i < (int)m_vShapes.size(); i++)
		delete m_vShapes[i];
	m_vShapes.clear();
	ofstream fout("simple raytracer.ppm");
	m_pImage->writePPM(fout);
	fout.close();
}

Image* Renderer::getImage()
{
	return m_pImage;
}

Camera* Renderer::getCamera()
{
	return m_pCamera;
}