#include "StdAfx.h"
#include "CREParticle.h"
#include "FrameProfiler.h"
#include "HeapContainer.h"

#include <iterator>
#include <I3DEngine.h>
#include <IParticles.h>

// Track max pixels allowed
static float s_fMaxPixels = 0.f;

template<class T>
class CElemList
{
public:
	CElemList()
	{
		m_ListHead = m_ListTail = 0;
		m_Free = 0;
		m_UsedCount = 0;
	}

	~CElemList()
	{
		free();
	}

	void free()
	{
		// Move all to free list.
		clear();

		// Delete all free nodes.
		Node* node = m_Free;
		m_Free = 0;

		for (; node; node = node->next)
			m_allocator.Delete(node);
		m_allocator.FreeMemory();
	}

	void clear()
	{
		// Transfer all used nodes to free list.
		if (m_ListTail)
		{
			m_ListTail->next = m_Free;
			m_Free = m_ListHead;
		}
		m_ListHead = m_ListTail = 0;
		m_UsedCount = 0;
	}

	template <typename A1, typename A2>
	T* allocate_item(const A1& a1, const A2& a2)
	{
		Node* node;
		if (m_Free)
		{
			node = m_Free;
			m_Free = m_Free->next;
			node->Reset(a1, a2);
		}
		else
		{
			node = static_cast<Node*>( new (m_allocator.Allocate()) T(a1, a2) );
		}
		node->next = 0;

		if (m_ListTail)
			m_ListTail->next = node;
		else
			m_ListHead = node;
		m_ListTail = node;
		m_UsedCount++;

		return node;
	}

	// Tracks pixel coverage distribution for each frame, for fill-rate limiting.
	void ComputeMaxPixels()
	{
		FUNCTION_PROFILER_SYS(PARTICLE);

		// Find per-container maximum which will not exceed total.		
		// don't use static here, this function can be called before particle cvars are registered
		ICVar* pVar = gEnv->pConsole->GetCVar("e_ParticlesMaxScreenFill");
		if (!pVar)
			return;
		float fMaxTotalPixels = pVar->GetFVal() * gRenDev->GetWidth() * gRenDev->GetHeight();
		float fNewMax = fMaxTotalPixels;
		
		PREFAST_SUPPRESS_WARNING(6255) // _alloca indicates failure by raising a stack overflow exception. Consider using _malloca instead
		Array<float> afPixels((float*)alloca(m_UsedCount * sizeof(float)), m_UsedCount);
		float fTotalPixels = 0.f;

		int i = 0;
		for (Node* node = m_ListHead; node; node = node->next)
		{
			afPixels[i++] = node->GetPixelCount();
			fTotalPixels += node->GetPixelCount();
		}

		if (fTotalPixels > fMaxTotalPixels)
		{
			std::sort( afPixels.begin(), afPixels.end() );

			// Compute max pixels we can have per emitter before total exceeded, for this frame's data.
			float fUnclampedTotal = 0.f;
			for_array (i, afPixels)
			{
				float fTotal = fUnclampedTotal + (afPixels.size() - i) * afPixels[i];
				if (fTotal > fMaxTotalPixels)
				{
					fNewMax = (fMaxTotalPixels - fUnclampedTotal) / (afPixels.size() - i);
					break;
				}
				fUnclampedTotal += afPixels[i];
			}
		}

		// Update current value gradually.
		float fMaxChange = fNewMax * 0.5f;
		s_fMaxPixels = clamp_tpl(fNewMax, s_fMaxPixels - fMaxChange, s_fMaxPixels + fMaxChange);
	}

	void GetMemoryUsage(stl::SMemoryUsage& mem)
	{
		mem.nAlloc += m_allocator.GetTotalMemory().nAlloc;
		mem.nUsed += m_UsedCount * sizeof(Node);
	}

private:

	struct Node: T
	{
		Node* next;
	};

	stl::TPoolAllocator<Node, stl::PSyncNone> m_allocator;
	Node* m_ListHead;
	Node* m_ListTail;
	Node* m_Free;
	size_t m_UsedCount;
};

//////////////////////////////////////////////////////////////////////////
//
// Data needed for each render list in multi-threaded rendering.
//
struct CRenderListData
{
	CElemList<CREParticle>										m_Elems;
	stl::HeapAllocator<stl::PSyncNone>				m_VertHeap;
	stl::HeapAllocator<stl::PSyncNone>				m_IndsHeap;	

	CRenderListData() :
		m_Elems(),
		m_VertHeap(64*1024), // use 64k pages for vertices heap
		m_IndsHeap(4*1024)
		{}

	void FinishScene()
	{
		m_Elems.ComputeMaxPixels();
		m_Elems.clear();
		m_VertHeap.Reset();
		m_IndsHeap.Reset();
	}

	void Reset()
	{
		m_Elems.free();
		m_VertHeap.Clear();
		m_IndsHeap.Clear();
	}

	void GetMemoryUsage(stl::SMemoryUsage& mem)
	{
		mem += m_VertHeap.GetTotalMemory();
		mem += m_IndsHeap.GetTotalMemory();
		m_Elems.GetMemoryUsage(mem);
	}
};

static CRenderListData g_RenderListData[RT_COMMAND_BUF_COUNT];			// One for each render list.


//////////////////////////////////////////////////////////////////////////
//
// IAllocRender implementations.
//

struct CAllocRenderStore: IAllocRender
{
	CREParticle*	m_pRE;

	CAllocRenderStore(CREParticle* pRE)
		: m_pRE(pRE)
	{
		fMaxPixels = s_fMaxPixels;
	}

	// Set existing SVertices to RE, alloc new ones.
	virtual void Alloc( int nAllocVerts, int nAllocInds = 0 )
	{
		m_pRE->SetVertices( aVertices(), aIndices(), fPixels );

		CRenderListData& Data = g_RenderListData[ gRenDev->m_pRT->CurThreadFill() ];
				
		if (nAllocVerts > aVertices.available())
			aVertices.set( ArrayT( Data.m_VertHeap.NewArray<SVertexParticle>(nAllocVerts, 16), nAllocVerts ) );
		if (nAllocInds > aIndices.available())
			aIndices.set(ArrayT( Data.m_IndsHeap.NewArray<uint16>(nAllocInds), nAllocInds ));
	}	
	
	virtual CREParticle* RenderElement() const 
	{ 
		return m_pRE; 
	}
};

struct CAllocRenderDirect: IAllocRender
{
	CAllocRenderDirect()
	{
		fMaxPixels = s_fMaxPixels;
	}
	
	// Render existing SVertices, alloc new ones.
	virtual void Alloc( int nAllocVerts, int nAllocInds = 0 )
	{
		SRenderPipeline& rp = gRenDev->m_RP;

		// Update pipeline verts based on how many used.
		rp.m_NextStreamPtr.PtrVF_P3F_C4B_T4B_N3F2 += aVertices.size();
		rp.m_RendNumVerts += aVertices.size();
		rp.m_RendNumIndices += aIndices.size();

		int nAllocedVerts = 0, nAllocedInds = 0;
		if (nAllocVerts)
		{
			// Flush and alloc more.
			bool bUseLocal = rp.m_RIs[0].size() == 0;
			if (bUseLocal)
			{
				SRendItem riTemp;
				riTemp.pObj = rp.m_pCurObject;
				rp.m_RIs[0].AddElem(&riTemp);
			}

			gRenDev->FX_CheckOverflow( nAllocVerts, nAllocInds, rp.m_pRE, &nAllocedVerts, &nAllocedInds );

			if (bUseLocal)
				rp.m_RIs[0].SetUse(0);

			if (nAllocedInds < nAllocInds)
				// Limit vert count when index allocation also truncated.
				nAllocedVerts = min(nAllocedVerts, int((int64)nAllocedInds * nAllocVerts/nAllocInds));
		}
		aVertices.set(ArrayT( rp.m_NextStreamPtr.PtrVF_P3F_C4B_T4B_N3F2, nAllocedVerts ));
		aIndices.set(ArrayT( rp.m_RendIndices + rp.m_RendNumIndices, nAllocedInds ));
		nBaseVertexIndex = rp.m_RendNumVerts;
	}
};


//////////////////////////////////////////////////////////////////////////
//
// CREParticle implementation.
//

CREParticle::CREParticle( IParticleVertexCreator* pVC, const CCamera& cam )
: m_pVertexCreator(pVC)
, m_pCamera(&cam)
, m_fPixels(0.f)
{
	mfSetType(eDATA_Particle);
}

void CREParticle::Reset( IParticleVertexCreator* pVC, const CCamera& cam )
{
	m_pVertexCreator = pVC;
	m_pCamera = &cam;
	m_fPixels = 0.f;

	m_aVerts.set(0, 0);
	m_aVertCounts.set(0, 0);
}

CREParticle* 
CREParticle::Create( IParticleVertexCreator* pVC, const CCamera& cam, int threadList )
{
	FUNCTION_PROFILER_SYS(PARTICLE);

	assert(pVC);

	CRenderListData& Data = g_RenderListData[ threadList ];
	CREParticle* pRE = Data.m_Elems.allocate_item(pVC, cam);

	pRE->StoreVertices(true);

	return pRE;
}

CREParticle::~CREParticle()
{
  m_Type = eDATA_Unknown;
}

void CREParticle::Release(bool bForce)
{
	CRendElement::Release(bForce);
}

float 
CREParticle::mfDistanceToCameraSquared( Matrix34& matInst )
{
	// This should only be called when we still have a container reference.
	IParticleVertexCreator* pVC = m_pVertexCreator;
	assert(pVC);
	if (!pVC)
		return 0.f;
	return pVC->GetDistSquared( m_pCamera->GetPosition() );
}

void CREParticle::mfPrepare(bool bCheckOverflow)
{
	FUNCTION_PROFILER_SYS(PARTICLE);

	if (gRenDev->m_RP.m_pRE == this)
		return;

	gRenDev->m_RP.m_CurVFormat = eVF_P3F_C4B_T4B_N3F2;
	
	gRenDev->FX_StartMerging();
	
	gEnv->GetJobManager()->WaitForJob(m_SPUState);
	TransferVertices();

#if defined (DIRECT3D10) && !defined(PS3)
	if (gRenDev->m_RP.m_RendNumVerts)
		gRenDev->m_RP.m_pRE = this;
#else
	gRenDev->m_RP.m_RECustomTexBind[0] = m_CustomTexBind[0];
#endif
}

void 
CREParticle::StoreVertices( bool bWait )
{
	FUNCTION_PROFILER_SYS(PARTICLE);
	// if we use jobs, we can skip locks (other scheduling)	
	CAllocRenderStore alloc(this);
	m_pVertexCreator->ComputeVertices(*m_pCamera, alloc);		
}

void 
CREParticle::TransferVertices() const
{
	FUNCTION_PROFILER_SYS(PARTICLE);

	// This method copies vertices already created elsewhere into the render buffer

	CAllocRenderDirect alloc;

	// Track progress in Array reference objects. These only reference memory, so there is no copying of the elements referred to.
	Array<SVertexParticle> aVerts = m_aVerts;
	Array<uint16> aVertCounts = m_aVertCounts;

	const SShaderTechnique* pTech = gRenDev->m_RP.GetStartTechnique();
	bool bGeomShader = pTech && (pTech->m_Flags & FHF_USE_GEOMETRY_SHADER);

	while (!aVerts.empty())
	{
		if (aVertCounts.empty())
		{
			// Point sprites, 1 vertex per particle.
			if (bGeomShader)
			{
				// Just copy vertices for point sprites.
				alloc.Alloc(aVerts.size());
				if (alloc.aVertices.available() == 0)
					break;
				alloc.CopyVertices(aVerts);
			}
			else
			{
				if (!aVerts[0].st.x)
				{
					// Expand to 4 vertices each.
					alloc.Alloc(aVerts.size() * 4, aVerts.size() * 6);
					int nParts = min(aVerts.size(), alloc.aVertices.available() >> 2);
					if (nParts == 0)
						break;
					for (int n = 0; n < nParts; n++)
					{
						alloc.ExpandQuadVertices(aVerts[n]);
					}
					aVerts.erase_front(nParts);
					alloc.SetQuadsIndices();
				}
				else
				{
					// Expand to 8 vertices each.
					alloc.Alloc(aVerts.size() * 8, aVerts.size() * 18);
					int nParts = min(aVerts.size(), alloc.aVertices.available() >> 3);
					if (nParts == 0)
						break;
					for (int n = 0; n < nParts; n++)
					{
						alloc.ExpandOctVertices(aVerts[n]);
					}
					aVerts.erase_front(nParts);
					alloc.SetOctsIndices();
				}
			}
		}
		else
		{
			// Variable vertex count.
			assert(!bGeomShader);
			alloc.Alloc(aVerts.size(), aVerts.size() * 3);
			if (alloc.aVertices.available() < aVertCounts[0])
				// No room for any polygons
				break;
			alloc.SetPoliesIndices(aVerts, aVertCounts);
		}
	}

	alloc.Alloc(0);
}

//////////////////////////////////////////////////////////////////////////
//
// CRenderer particle functions implementation.
//

void 
CRenderer::ClearComputeVerticesQueue()
{
	g_RenderListData[gRenDev->m_pRT->CurThreadFill()].FinishScene();
}

void
CRenderer::EF_CleanupParticles()
{
	if (gEnv->p3DEngine)
		gEnv->p3DEngine->GetParticleManager()->SyncComputeVerticesJobQueues();

	for (int i = 0; i < 2; ++i)
		g_RenderListData[i].Reset();
}

void 
CRenderer::SafeReleaseParticleREs()
{
	EF_CleanupParticles();
}

void 
CRenderer::GetMemoryUsageParticleREs(size_t& nAlloc, size_t& nUse)
{
	stl::SMemoryUsage mem;
	for (int i = 0; i < 2; ++i)
		g_RenderListData[i].GetMemoryUsage(mem);
	nAlloc = mem.nAlloc;
	nUse = mem.nUsed;
}

