#include "stdafx.h"
#include "Camera.h"
#include "Matrix.h"
#include "MyMath.h"

namespace FIRSTDX
{

	Camera::Camera():m_pos(0,0,0),m_velocity(0,0,0),m_rotation(0,0,0),m_drag(0.1f),
		m_direction(0,0,-1),m_scale(0.2f,0.2f,0.2f),
		m_const_offsetValue(0,0,0),m_targetRotation(0,0,0)
	{}
	const vec3f& Camera::position() const {return m_pos;}

	void Camera::setPosition(const vec3f& position)
	{
		m_pos = position ;
	}
	void Camera::applyImpulse(const vec3f& impulse)
	{
		

		Matrix3f rotationMatrix;
		rotationMatrix.fromEulerAngles( m_rotation.x * PI /180, m_rotation.y * PI /180, m_rotation.z * PI /180 );
		vec3f worldSpaceImpulse = rotationMatrix * impulse ;
		m_velocity += worldSpaceImpulse;
	// camera pos controlled by the model pos now 
		m_offset = rotationMatrix * m_const_offsetValue;
		m_direction = rotationMatrix * vec3f(0,0,-1); 
		// vec3f(0,0,-1) is camera basis, not world basis, this has something to do with the view matrix because view matrix is camera basis
		m_up = rotationMatrix * vec3f(0,1,0);
		//same camera basis
	}

	void Camera::applyImpulseWorldBasis(const vec3f& impulse)
	{
		m_velocity = impulse;
	}
	const vec3f& Camera::getRotation()
	{
		return m_rotation;
	}
	const vec3f& Camera::setRotation(const vec3f& rotation){return m_rotation = rotation;}
	const vec3f& Camera::getDirection()
	{
		return m_direction;
	}
	void Camera::rotate(const vec3f& change)
	{
		//third person rotation
		m_rotation += change;
		m_rotation.x  = clamp<float>(m_rotation.x,-90.0f, 90.0f);
	}


	void Camera::setViewTransformation()
	{
		initializeView();
		Matrix4f t,tpos,ry,rx,rz;
		t = Matrix4f(1,0.0f,0.0f,0.0f, 
			0.0f,1,0.0f,0.0f,
			0.0f,0.0f,1,0.0f,
			m_const_offsetValue[0],m_const_offsetValue[1],m_const_offsetValue[2],1);

		tpos = Matrix4f(1,0.0f,0.0f,0.0f, 
			0.0f,1,0.0f,0.0f,
			0.0f,0.0f,1,0.0f,
			m_pos.x,m_pos.y ,m_pos.z  ,1);
	 
		float angle =  m_rotation.y / 180.0f * PI;
		ry = Matrix4f(cos(angle),0.0f,sin(angle),0.0f, 
			0.0f,1,0.0f,0.0f,
			-sin(angle),0.0f,cos(angle),0.0f,
			0.0f,0.0f,0.0f,1.0f);

		angle =  m_rotation.x / 180.0f * PI;
		rx = Matrix4f(1,0.0f,0.0f,0.0f, 
			0.0f,cos(angle),-sin(angle),0.0f,
			0.0f,sin(angle),cos(angle),0.0f,
			0.0f,0.0f,0.0f,1.0f);

		angle =  m_rotation.z / 180.0f * PI;
		rz = Matrix4f(cos(angle),-sin(angle),0.0f,0.0f, 
			sin(angle),cos(angle),0.0f,0.0f,
			0.0f,0.0f,1.0f,0.0f,
			0.0f,0.0f,0.0f,1.0f);

		
 
		m_MV_matrix =    ( /* t **/ (rx * ry * rz ) * tpos  )  * m_view;
	  
		// glGetFloatv (GL_MODELVIEW_MATRIX, m_MV_matrix.getDataPointer());
		//trace("cam MV "<<m_MV_matrix[12]<<" "<<m_MV_matrix[13]<<" "<<m_MV_matrix[14]<<std::endl);

	}


	void Camera::scale( const float scaleValue )
	{
		m_scale *= (1 + scaleValue);
	}

	void Camera::moveTo( vec3f& modelPos )
	{
		//trace(m_pos.x<<" "<<m_pos.y<<" "<<m_pos.z);
		//chase camera
		 m_pos = Lerp<vec3f,float>(m_pos,modelPos + m_offset ,0.1f);		 
	}

	void Camera::initializeView()
	{		 
 
		vec3f vz= -m_direction;
		vz.normalize();
		vec3f up = vec3f(0,1,0);
		vec3f vx =  up.crossProduct(vz);
		vx.normalize();

		// vy doesn't need to be normalized because it's a cross
		// product of 2 normalized vectors
		vec3f vy = vz.crossProduct(vx);
		Matrix4f inverseViewMatrix = Matrix4f(vx[0],vx[1],vx[2],0,
			vy[0],vy[1],vy[2],0,
			vz[0],vz[1],vz[2],0,
			m_pos[0],m_pos[1],m_pos[2],1);
		m_view = inverseViewMatrix.inverse();
		return;
 
#if 0
		//////////////////////////////////////////////////
		Matrix4f Tc(1,0,0,0,
					0,1,0,0,
					0,0,1,0,
					m_pos[0],m_pos[1],m_pos[2],1);//camera pos

		//Camera orientation (basis) in the world. 
		//Note that using the canonical space rotation about X matrix (row-major!) works out.
 
		vec3f kc = -m_direction;
		kc.normalize();
 
 
		vec3f j = vec3f(0,1,0);//up
	 
		vec3f ic = j.crossProduct(kc);
		
		ic.normalize();

		vec3f jc = kc.crossProduct(ic);

		Matrix4f Rc= Matrix4f(ic[0],ic[1],ic[2],0,
			jc[0],jc[1],jc[2],0,
			kc[0],kc[1],kc[2],0,
			0,0,0,1 );

		//Rc*Tc is Camera->World. We need World->Camera (its inverse)
		//Inverse is transpose for orthogonal
		 
		Rc = Rc.inverse();
		// Rc.transpose();
		//Inverse for translation is negation
		Tc = Tc.inverse();
		//or do this:
//     		Tc.getDataPointer()[12] = -Tc[12];
//     		Tc.getDataPointer()[13] = -Tc[13];
//     		Tc.getDataPointer()[14] = -Tc[14];
		// (Rc*Tc).inverse = Tc.inverse * Rc.transpose
 
		m_view = Tc * Rc; 
#endif		 
	}

	Matrix4f Camera::setProjection(float fov, float aspect,
		float znear, float zfar)
	{
		float    h, w, Q;

		
		h = (float)1/tan(fov * PI_OVER_360);   // 1/tan(x) == cot(x)
		w = h/aspect;  // 1/tan(x) == cot(x)
		Q = zfar/(zfar - znear);

		m_projection = Matrix4f(w,0,0,0,
			0,h,0,0,
			0,0,Q,1,
			0,0,-Q*znear,0);
 
		return m_projection;
		/*
		//usage setProjection(45.0,aspectRatio,0.1,10000.0);
		float xymax = znear * tan(fov * PI_OVER_360);
		float ymin = -xymax;
		float xmin = -xymax;

		float width = xymax - xmin;
		float height = xymax - ymin;

		float depth = zfar - znear;
		float q = -(zfar + znear) / depth;
		float qn = -2 * (zfar * znear) / depth;

		float w = 2 * znear / width;
		w = w / aspect;
		float h = 2 * znear / height;
		m_projection = Matrix4f(w,0,0,0,
			0,h,0,0,
			0,0,q,-1,
			0,0,qn,0);

		return m_projection;
		*/
	}

	void Camera::updatePos()
	{
		m_velocity -= m_velocity * m_drag;
		m_pos += m_velocity;
	}





}