#pragma once

#include "CogObjective.h"

// ------------------------------------------------------------------------------------------

CogEvaluator::CogEvaluator()
: paraSymbolIdx(OP_PARAMETER),
  cacheMode(COGCM_OFF)
{
}

CogEvaluator::~CogEvaluator()
{
}

bool CogEvaluator::getId(const string& label, uint32_t& id)
{
	for (map<size_t, CogOpInfo>::reverse_iterator op_iter = validOps.rbegin(); op_iter != validOps.rend(); ++op_iter)
	{
		if (label.size() >= op_iter->second.label.size())
		{
			bool same = true;
			for (size_t j = 0; j < op_iter->second.label.size(); j++)
			{
				// character mismatch
				if (tolower(label[j]) != tolower(op_iter->second.label[j]))
				{
					same = false;
					break;
				}
			}
			if (same)
			{
				if (label.size() > op_iter->second.label.size())
				{
					if (label[op_iter->second.label.size()] == ' ')
					{
						string substring = label.substr(op_iter->second.label.size()+1);
						if (substring.size() && readParameter(op_iter->first, substring))
						{
							validOps[paraSymbolIdx] = CogOpInfo(label, op_iter->second.precedence);
							id = paraSymbolIdx++;
							return true;
						}
						else
						{
							globalMessageLog.print(COGMT_ERROR_IO, VERBOSITY_NONE, "Terminal specification \"%s\" is not valid for known terminal type \"%s\"", substring.c_str(), label.substr(0, op_iter->second.label.size()).c_str());
						}
					}
				}
				else
				{
					id = op_iter->first;
					return true;
				}		
			}
		}
	}
	return false;
}

bool CogEvaluator::getLabel(uint32_t id, string& label)
{
	map<uint32_t, CogOpInfo>::iterator VO_iter = validOps.begin();
	for (; VO_iter != validOps.end(); ++VO_iter)
	{
		if (VO_iter->first == id)
		{
			label = VO_iter->second.label;
			return true;
		}
	}
	return false;
}

// ------------------------------------------------------------------------------------------

template <class TD, class PD>
CogSampledEvaluator<TD, PD>::CogSampledEvaluator()
{
}

template <class TD, class PD>
CogSampledEvaluator<TD, PD>::~CogSampledEvaluator()
{
}

template <class TD, class PD> 
size_t CogSampledEvaluator<TD, PD>::setCacheOffset(size_t offset) 
{ 
	switch (cacheMode)
	{
	case COGCM_VALUE:
		cacheOffset = offset; 
		return cacheOffset + sizeof(bool) + (targets.size()*sizeof(TD)); 
	case COGCM_STACK:
		cacheOffset = offset; 
		return cacheOffset + sizeof(boost::mutex*) + sizeof(PD*);
	default:
		return offset;
	}
}

template <class TD, class PD> 
void CogSampledEvaluator<TD, PD>::constructCache(char* pCache) 
{ 
	switch (cacheMode)
	{
	case COGCM_VALUE:
		pCache[0] = false; 
		break;
	case COGCM_STACK:
		*(reinterpret_cast<boost::mutex**>(pCache)) = new boost::mutex();
		*(reinterpret_cast<PD**>(pCache+sizeof(boost::mutex*))) = NULL;
		break;
	default:
		break;
	}
}

template <class TD, class PD> 
void CogSampledEvaluator<TD, PD>::destroyCache(char* pCache)
{
	switch (cacheMode)
	{
	case COGCM_STACK:
		delete *(reinterpret_cast<boost::mutex**>(pCache));
		delete *(reinterpret_cast<PD**>(pCache+sizeof(boost::mutex*)));
		break;
	default:
		break;
	}
}

template <class TD, class PD>
double CogSampledEvaluator<TD, PD>::evaluate(CogProduction* pRule)
{
	double totalError = 0;
	reset();
	switch (cacheMode)
	{
		case COGCM_VALUE:
			for (bool more = first(); more; more = next())
			{
				totalError += error(*(computeData(pRule)+*cCaseIdx));
			}
			break;
		case COGCM_STACK:
			for (bool more = first(); more; more = next())
			{
				totalError += error(process(*collectData(pRule)));
			}
			break;
		default:
			{
				PD rootStack;
				rootExpand(pRule, rootStack);
				for (bool more = first(); more; more = next())
				{
					totalError += error(process(rootStack));
				}
			}
			break;
	}
	totalError /= targets.size();
	return (totalError < FLT_EPSILON) ? 0 : totalError;
}

template <class TD, class PD>
TD* CogSampledEvaluator<TD, PD>::computeData(CogProduction* pRule)
{
	if (!*(pRule->pCache+cacheOffset))
	{
		PD computeStack;
		computeExpand(pRule, computeStack);
		size_t priorIdx = *cCaseIdx;
		for (bool more = first(); more; more = next())
		{	
			*(reinterpret_cast<TD*>(pRule->pCache+cacheOffset+sizeof(bool)+*cCaseIdx*sizeof(TD))) = process(computeStack, *cCaseIdx);
		}
		*(pRule->pCache+cacheOffset) = true;
		*cCaseIdx = priorIdx;
	}
	return reinterpret_cast<TD*>(pRule->pCache+cacheOffset+sizeof(bool));
	//return *(reinterpret_cast<TD*>(pRule->pCache+cacheOffset+sizeof(bool)+*cCaseIdx*sizeof(TD)));
}

template <class TD, class PD>
inline
void CogSampledEvaluator<TD, PD>::computeExpand(CogProduction* pRule, PD& data)
{
	for (vector<CogSymbol>::iterator sym_iter = pRule->rhs.begin(); sym_iter != pRule->rhs.end(); ++sym_iter)
	{
		if (sym_iter->flag & COGSF_TERMINAL)
		{
			pushOperator(sym_iter->id, data);
		}
		else
		if (sym_iter->flag & COGSF_EVAL)
		{
			pushValuePtr(computeData(sym_iter->pRule), data);
		}
		else
		{
			computeExpand(sym_iter->pRule, data);
		}
	}
}

template <class TD, class PD>
inline
PD* CogSampledEvaluator<TD, PD>::collectData(CogProduction* pRule)
{
	PD** ppStack = reinterpret_cast<PD**>(pRule->pCache+cacheOffset+sizeof(boost::mutex*));
	(*reinterpret_cast<boost::mutex**>(pRule->pCache+cacheOffset))->lock();
	if (*ppStack == NULL)
	{
		PD* pData = new PD();
		collectExpand(pRule, *pData);
		*ppStack = pData;
	}
	(*reinterpret_cast<boost::mutex**>(pRule->pCache+cacheOffset))->unlock();
	return *ppStack;
}

template <class TD, class PD>
inline
void CogSampledEvaluator<TD, PD>::collectExpand(CogProduction* pRule, PD& data)
{
	for (vector<CogSymbol>::iterator sym_iter = pRule->rhs.begin(); sym_iter != pRule->rhs.end(); ++sym_iter)
	{
		if (sym_iter->flag & COGSF_TERMINAL)
		{
			pushOperator(sym_iter->id, data);
		}
		else
		if (sym_iter->flag & COGSF_EVAL)
		{
			PD* pData = collectData(sym_iter->pRule);
			merge(data, *pData);
		}
		else
		{
			collectExpand(sym_iter->pRule, data);
		}
	}
}

template <class TD, class PD>
inline
void CogSampledEvaluator<TD, PD>::rootExpand(CogProduction* pRule, PD& data)
{
	for (vector<CogSymbol>::iterator sym_iter = pRule->rhs.begin(); sym_iter != pRule->rhs.end(); ++sym_iter)
	{
		if (sym_iter->flag & COGSF_TERMINAL)
		{
			pushOperator(sym_iter->id, data);
		}
		else
		if (sym_iter->flag & COGSF_EVAL)
		{
			PD subStack;
			rootExpand(sym_iter->pRule, subStack);
			merge(data, subStack);
		}
		else
		{
			rootExpand(sym_iter->pRule, data);
		}
	}
}

template <class TD, class PD>
inline
void CogSampledEvaluator<TD, PD>::reset()
{
	cCaseIdx.reset(new size_t(0));
}

template <class TD, class PD>
inline
TD CogSampledEvaluator<TD, PD>::target()
{
	return targets[*cCaseIdx];
}

template <class TD, class PD>
inline
bool CogSampledEvaluator<TD, PD>::first()
{
    (*cCaseIdx) = 0;
	return targets.size() ? true : false;
}

template <class TD, class PD>
inline
bool CogSampledEvaluator<TD, PD>::next()
{
	if (*cCaseIdx + 1 < targets.size())
	{
		(*cCaseIdx)++;
		return true;
	}
	else
	{
		return false;
	}
}

// ------------------------------------------------------------------------------------------

template <class TD>
CogStackProcessor<TD>::CogStackProcessor()
{
	validOps[OP_PIN] = CogOpInfo("(", 0);
	validOps[OP_POUT] = CogOpInfo(")", 0);
}

template <class TD>
inline
void CogStackProcessor<TD>::pushOperator(size_t id, CogStackData<TD>& data)
{
	switch (id)
	{
	case OP_PIN:
		data.symbolStack.push(OP_PIN);
		break;
	case OP_POUT:
		while (!data.symbolStack.empty() && (data.symbolStack.top() != OP_PIN))
		{
			data.rpnQueue.push_back(data.symbolStack.top());
			data.symbolStack.pop();
		}
		if (!data.symbolStack.empty())
		{
			data.symbolStack.pop();
		}
		break;
	default:
		if (!validOps[id].precedence) // operand
		{
			data.rpnQueue.push_back(id);
		}
		else
		{
			while (!data.symbolStack.empty() && (validOps[id].precedence < validOps[data.symbolStack.top()].precedence))
			{
				data.rpnQueue.push_back(data.symbolStack.top());
				data.symbolStack.pop();
			}
			data.symbolStack.push(id);
		}
	}
}

template <class TD>
inline
void CogStackProcessor<TD>::pushValuePtr(TD* pValue, CogStackData<TD>& data)
{
	data.rpnQueue.push_back(OP_VALUE);
	data.valuePtrQueue.push_back(pValue);
}

template <class TD>
TD CogStackProcessor<TD>::process(CogStackData<TD>& data, size_t valC = 0)
{
	stack<TD> resultStack;
	while (!data.symbolStack.empty())
	{
		data.rpnQueue.push_back(data.symbolStack.top());
		data.symbolStack.pop();
	}
	list<TD*>::iterator vq_iter = data.valuePtrQueue.begin();
	list<size_t>::iterator rpn_iter = data.rpnQueue.begin();
	while (rpn_iter != data.rpnQueue.end())
	{
		if (*rpn_iter == OP_VALUE) // operand/value
		{
			resultStack.push(*(*vq_iter+valC));
			++vq_iter;
		}
		else // operator
		{
			resultStack.push(processOperator(*rpn_iter, resultStack));
		}
		++rpn_iter;
	}
	return resultStack.top();
}

template <class TD>
void CogStackProcessor<TD>::merge(CogStackData<TD>& original, CogStackData<TD>& extension)
{
	// this needs to be done before merging...
	while (!extension.symbolStack.empty())
	{
		extension.rpnQueue.push_back(extension.symbolStack.top());
		extension.symbolStack.pop();
	}
	original.rpnQueue.insert(original.rpnQueue.end(), extension.rpnQueue.begin(), extension.rpnQueue.end());
	original.valuePtrQueue.insert(original.valuePtrQueue.end(), extension.valuePtrQueue.begin(), extension.valuePtrQueue.end());
}