//## begin module%3C6E5F570203.cm preserve=no
//	  %X% %Q% %Z% %W%
//## end module%3C6E5F570203.cm

//## begin module%3C6E5F570203.cp preserve=no
//	+----------------------------------------------+
//	| SVMBR - A program for training SVMs          |
//	+----------------------------------------------+
//	| * Creator, modeling, coding:                 |
//	|    Marcelo Barros de Almeida                 |
//	|    barros@smar.com.br                        |
//	|    http://litc.cpdee.ufmg.br/~barros/        |
//	| * Coding, improvements, bug fixes:           |
//	|    Bernardo Penna                            |
//	|    bprc@brfree.com.br                        |
//	+----------------------------------------------+
//
//	 Copyright(c) 2002 by Marcelo Barros de Almeida
//	                            All rights reserved
//
//	-[CVS]------------------------------------------
//	$Author$
//	$Date$
//	$Source$
//	$Name$
//	$Revision$
//	------------------------------------------------
//## end module%3C6E5F570203.cp

//## Module: edrsolver%3C6E5F570203; Package body
//## Subsystem: <Top Level>
//## Source file: D:\textotese\svmbr\src\edrsolver.cpp

//## begin module%3C6E5F570203.additionalIncludes preserve=no
//## end module%3C6E5F570203.additionalIncludes

//## begin module%3C6E5F570203.includes preserve=yes
//## end module%3C6E5F570203.includes

// edrsolver
#include "edrsolver.h"
//## begin module%3C6E5F570203.declarations preserve=no
//## end module%3C6E5F570203.declarations

//## begin module%3C6E5F570203.additionalDeclarations preserve=yes
//## end module%3C6E5F570203.additionalDeclarations


// Class EDRSolver 








EDRSolver::EDRSolver (REAL cv, REAL tolv, REAL epsilonv, INT numEDRv, REAL EDRPower, SVM *svmp, Kernel *kernelp, Data *datap, INT chunksizev)
  //## begin EDRSolver::EDRSolver%998688402.hasinit preserve=no
  //## end EDRSolver::EDRSolver%998688402.hasinit
  //## begin EDRSolver::EDRSolver%998688402.initialization preserve=yes
  //## end EDRSolver::EDRSolver%998688402.initialization
{
  //## begin EDRSolver::EDRSolver%998688402.body preserve=yes
	init(cv,tolv,epsilonv,numEDRv,EDRPower,svmp,kernelp,datap,chunksizev);
  //## end EDRSolver::EDRSolver%998688402.body
}


EDRSolver::~EDRSolver()
{
  //## begin EDRSolver::~EDRSolver%.body preserve=yes
	freeSpace();
  //## end EDRSolver::~EDRSolver%.body
}



//## Other Operations (implementation)
INT EDRSolver::run (INT maxi)
{
  //## begin EDRSolver::run%998688367.body preserve=yes
	/*
         * tolerance must be bigger than epsilon for EDR.
	 * ---------------------------------------------
	 */
  INT i,j, k, idxma, idxmb;

  // initialize random seed
  SRAND(time(NULL));

  // start to count time
  crono.startCronometer();

  // main loop
	INT numExamples = chunkSize /*data->getnumExamples()*/;
  INT numChanged = numExamples;
	REAL ra, rb;

  examineAll = TRUE;

	for(i = 0; i < numExamples; i++){
		numRpt[i] = 1;
		oldAlphaSubset[i] = alpha[subset[i]];
	}

  cout << "Starting training (EDR)." << endl << flush;

  while((numChanged > 0) /*&& (numIter < numIterMax)*/) {

    numChanged = 0;
    maxErrorA = -10000000; minErrorA = 10000000;
    maxErrorB = -10000000; minErrorB = 10000000;

		// empty EDR queue:
		// the same vector cannot be submitted in sequence. You need
		// to present other vector before .
		INT end = 0;
		while(!end){
			end =  1;
			for(i = RAND(numExamples), j = 0 ; j < numExamples; j++, i++){
				k = i % numExamples;
				if(numRpt[k]-- > 0) {
					numChanged += examineExample(k);
					end = 0;
				}
			}
		}

		// detecting max and min errors
		for(i = 0 ; i < numExamples ; i++){

			numRpt[i] = 1;

			if(data->gettarget(subset[i]) > 0) {
				if(errorCache[subset[i]] > maxErrorA){
					maxErrorA = errorCache[subset[i]];
					idxma = i;
				}
				else
					if(errorCache[subset[i]] < minErrorA)	
						minErrorA = errorCache[subset[i]];
			}
			else {
				if(errorCache[subset[i]] > maxErrorB){
					maxErrorB = errorCache[subset[i]];
					idxmb = i;
				}
				else
					if(errorCache[subset[i]] < minErrorB)	
						minErrorB = errorCache[subset[i]];
			}
		}

		// progress
		cout << "IT=" << ++numIter << " CH=" << numChanged << endl << flush;
		if(numChanged == 0) break;	

		// two vector are already at the upper limit: 
		numRpt[idxma]++;
		numRpt[idxmb]++;

		// creating repetition vector
    for(j = numEDR - 1; j > 0 ; j--){
			// EDR functions
			ra = pow(j*(maxErrorA - minErrorA)/numEDR,1/power) + minErrorA;
			rb = pow(j*(maxErrorB - minErrorB)/numEDR,1/power) + minErrorB;

	    for(i = 0; i < numExamples ; i++){
				if(data->gettarget(subset[i])>0){
					if(errorCache[subset[i]] > ra) numRpt[i]++;
				}
				else {
					if(errorCache[subset[i]] > rb ) numRpt[i]++;
				}
			}
    }

	}

	//calcBias();
	bias = - bias;

  // finish time counting
  crono.stopCronometer();

  //calcNormW();
  return TRUE;

  //## end EDRSolver::run%998688367.body
}

INT EDRSolver::run_chunck (INT maxi)
{
  //## begin EDRSolver::run_chunck%998688368.body preserve=yes

	INT numA = 0, numB = 0;
	INT j, l , k, i;
	PQueue* pqA;
	PQueue* pqB;

	// initing subset: first vectors with 50% of each class
	// data should be randomized
	INT numAMax = data->getnumClassA();
	INT numBMax = data->getnumClassB();
	INT ne      = data->getnumExamples();

	if(numAMax > (chunkSize/2))
		numAMax = chunkSize/2;

	if(numBMax > (chunkSize/2))
		numBMax = chunkSize/2;


	for(i = 0, l = 0, k = 0; i < ne ; i++){
		if((data->gettarget(i) == 1) && (numA < numAMax)){
			subset[k++] = i;
			numA++;
		}
		else 
			if((data->gettarget(i) == -1) && (numB < numBMax)){
				subset[k++] = i;
				numB++;
			}
			else
				supset[l++] = i;
	}


	cache->SetChunk(subset);
	run(maxi);

	// update alpha and bias for remaining set
	
  for(i = 0; i < chunkSize ; i++) {
    k = subset[i];
		REAL dalpha = oldAlphaSubset[i] - alpha[k];
		REAL target = data->gettarget(k);
		if(ABS(dalpha)>tol){	
			for(j = 0 ; j < (ne - chunkSize) ; j++){
				l = supset[j];
				errorCache[l] = errorCache[l] + 
												data->gettarget(l)*target*dalpha*(cache->evalKernel(k,l) + 1) ;
				}
		}
	}
  /*
	REAL cte = 0;
  for(i = 0; i < chunkSize ; i++) {
    k = subset[i];
		oldAlphaSubset[i] = (oldAlphaSubset[i] - alpha[k])*data->gettarget(k);
		cte += oldAlphaSubset[i];
	}

	for(j = 0 ; j < (ne - chunkSize) ; j++){
		l = supset[j];
		INT targ = data->gettarget(l);
		for(i = 0 ; i < chunkSize ; i++)
			errorCache[l] += targ*cache->evalKernel(subset[i],l)*oldAlphaSubset[i];
		errorCache[l] += targ*cte;
	}
  */
	// priority queues
	pqA = new PQueue(data->getnumClassA());
	pqB = new PQueue(data->getnumClassB());

	// main loop 
	while(TRUE) {


		// build new subset
		for(i = 0 ; i < ne ; i++){
			if(data->gettarget(i) == 1){
				//if(IS_SV(alpha[i]))
				//	pqA->add(errorCache[i]*10000,i);
				//else
					pqA->add(errorCache[i],i);
			}
			else{
				//if(IS_SV(alpha[i]))
				//	pqB->add(errorCache[i]*10000,i);
				//else
					pqB->add(errorCache[i],i);
			}
		}

		k = MIN(numAMax,numBMax);

		for(i = 0, j = -1 ; i < k ; i++){
			subset[++j] = pqA->remove(0);
			subset[++j] = pqB->remove(0);
		}

		if(numAMax != numBMax) {
			if(numAMax > numBMax) {
				for(i = 0 ; i < (numAMax - numBMax) ; i++, j++)
					subset[j] = pqA->remove(0);
			}
			else {
				for(i = 0 ; i < (numBMax - numAMax) ; i++, j++)
					subset[j] = pqB->remove(0);
			}
		}

		j = 0;
		while(pqA->getnumElements())
			supset[j++] = pqA->remove(0);

		while(pqB->getnumElements())
			supset[j++] = pqB->remove(0);

		// training
		cache->SetChunk(subset);
		INT oldNumIter = numIter;
		run(maxi);

		// teste convergence
  	if((numIter - oldNumIter) == 1) break;

		// update alpha and bias
	  for(i = 0; i < chunkSize ; i++) {
		  k = subset[i];
			REAL dalpha = oldAlphaSubset[i] - alpha[k];
			REAL target = data->gettarget(k);
			if(ABS(dalpha)>tol){	
				for(j = 0 ; j < (ne - chunkSize) ; j++){
					l = supset[j];
					errorCache[l] = errorCache[l] + 
													data->gettarget(l)*target*dalpha*(cache->evalKernel(l,k) + 1) ;
					}
			}
		}
	  /*

		cte = 0;
		for(i = 0; i < chunkSize ; i++) {
	    k = subset[i];
			oldAlphaSubset[i] = (oldAlphaSubset[i] - alpha[k])*data->gettarget(k);
			cte += oldAlphaSubset[i];
		}

		for(j = 0 ; j < (ne - chunkSize) ; j++){
			l = supset[j];
			INT targ = data->gettarget(l);
			for(i = 0 ; i < chunkSize ; i++)
				errorCache[l] += targ*cache->evalKernel(subset[i],l)*oldAlphaSubset[i];
			errorCache[l] += targ*cte;
		}
		*/
	}

	delete pqA;
	delete pqB;

	return TRUE;

  //## end EDRSolver::run_chunck%998688368.body
}

INT EDRSolver::examineExample (UINT i)
{
  //## begin EDRSolver::examineExample%998688389.body preserve=yes
  REAL dalpha, nalpha;
  INT j;
  INT  ret = 0;
	INT target = data->gettarget(subset[i]);

  // calculate delta alpha
	dalpha = errorCache[subset[i]]/(evalKernel(subset[i],subset[i]) + 1);
	nalpha = alpha[subset[i]] + dalpha;
  // check bounds
  if(nalpha > (C-epsilon)) 
		nalpha = C;
	else 
		if(nalpha < epsilon)
			nalpha = 0;

  // dalpha after bounds: we can avoid one multiplication operation inside the loop
  dalpha   = alpha[subset[i]] - nalpha;
  alpha[subset[i]] = nalpha;

  if(ABS(dalpha)>tol){	
		REAL *row = cache->getKernelRow(subset[i]);
		for(j = 0 ; j < chunkSize; j++){
			errorCache[subset[j]] = errorCache[subset[j]] + 
											        data->gettarget(subset[j])*target*dalpha*(row[j] + 1 ) ;
		}
    bias = bias - dalpha*target;
    ret = 1;
  }

  return ret;

  //## end EDRSolver::examineExample%998688389.body
}

INT EDRSolver::run ()
{
  //## begin EDRSolver::run%998880779.body preserve=yes

	return run(-1);

  //## end EDRSolver::run%998880779.body
}

INT EDRSolver::run_chunck ()
{
  //## begin EDRSolver::run_chunck%998880780.body preserve=yes

	return run_chunck(-1);

  //## end EDRSolver::run_chunck%998880780.body
}

VOID EDRSolver::init (REAL cv, REAL tolv, REAL epsilonv, INT numEDRv, REAL EDRPower, SVM *svmp, Kernel *kernelp, Data *datap, INT chunksizev)
{
  //## begin EDRSolver::init%999602350.body preserve=yes

	Solver::init(cv,tolv,epsilonv,svmp,kernelp,datap,chunksizev);

	numEDR = numEDRv;
	power  = EDRPower;

	try{
		numRpt = new INT [chunkSize/*data->getnumExamples()*/];
	} 
	catch(...){
		cerr << "Error when allocating memory (EDR::init)." << endl;
		exit(1);
	}

	// fixing initial error vector for EDR: err_i = -(f(x_i)-y_i)*y_i
	for(INT i = 0; i < data->getnumExamples(); i++){
		errorCache[i] = 1; 
  }

  //## end EDRSolver::init%999602350.body
}

VOID EDRSolver::calcBias ()
{
  //## begin EDRSolver::calcBias%999602351.body preserve=yes
	cerr << "Not implemented (EDRSolver::calcBias)" << endl;
  //## end EDRSolver::calcBias%999602351.body
}

VOID EDRSolver::freeSpace ()
{
  //## begin EDRSolver::freeSpace%999602352.body preserve=yes

	Solver::freeSpace();
	delete numRpt;

  //## end EDRSolver::freeSpace%999602352.body
}

VOID EDRSolver::sincSets ()
{
  //## begin EDRSolver::sincSets%1000125182.body preserve=yes
	cerr << "Not implemented (EDRSolver::sincSets)" << endl;
  //## end EDRSolver::sincSets%1000125182.body
}

VOID EDRSolver::genWorkingSet ()
{
  //## begin EDRSolver::genWorkingSet%1000125183.body preserve=yes
	cerr << "Not implemented (EDRSolver::genWorkingSet)" << endl;
  //## end EDRSolver::genWorkingSet%1000125183.body
}

VOID EDRSolver::updateWorkingSet ()
{
  //## begin EDRSolver::updateWorkingSet%1000125184.body preserve=yes
	cerr << "Not implemented (EDRSolver::updateWorkingSet)" << endl;
  //## end EDRSolver::updateWorkingSet%1000125184.body
}

// Additional Declarations
  //## begin EDRSolver%3B87FD980077.declarations preserve=yes
  //## end EDRSolver%3B87FD980077.declarations

//## begin module%3C6E5F570203.epilog preserve=yes
//## end module%3C6E5F570203.epilog
