/****************************************************************************
*
*					Functions of class ImprovedGreedyAlgorithm
*
****************************************************************************/

#include <string>
#include <utility>
#include <iostream>
#include <stdlib.h>
#include "SimpleFeature.h"
#include "ImprovedGreedyAlgorithm.h"

// calling resize for at least this number of elements
#define minVecResize		16
#define	eps					0.000001


/****************************************************************************
*					ImprovedGreedyAlgorithm::ImprovedGreedyAlgorithm
*
*	Description:	Standard constructor with initialization of internal parameters
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
ImprovedGreedyAlgorithm::ImprovedGreedyAlgorithm (void) {
	// Initial values of parameters:
	//	threshold for EP values of individual features (for them to be considered)
	thresholdForIndividualFeats = 0.150;
	//	threshold for EP values of arbitrary features (for them to be used in the algorithm)
	thresholdForArbitraryFeats = 0.075;
	//	min number of individual features from one variable
	minNumOfIndivFeatsPerVariable = 2;
	//	max size of arbitrary features (for them to be used in the algorithm)
	maxSizeOfArbitraryFeats = 4;
	//	max number of features with high EQ and low EP to be added at each step of the greedy algorithm
	maxNumOfFeatsHighEQAdded = 5;
	//  max number of all features in the pgm
	maxWholeNumOfFeats = (unsigned int) -1;
	//	min acceptable gain in likelihood to continue adding features with high EP
	minLikelihoodGainHighEP = 1.0014;
	//	min acceptable gain in likelihood to continue adding features with high EQ
	minLikelihoodGainHighEQ = 1.0003;
	//  max number of iterations without objective value change
	numIterWithSameValue = (unsigned int) -1;
	inference = NULL;
	parameterLearning = NULL;
	//  parameter used in combineFeatures function (should be set before running that function)
	threshold = 0;
}


/****************************************************************************
*					ImprovedGreedyAlgorithm::~ImprovedGreedyAlgorithm
*
*	Description:	Standard destructor with removing all dynamic internal variables
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
ImprovedGreedyAlgorithm::~ImprovedGreedyAlgorithm (void) {
	if (inference != NULL)
		delete inference;
	if (parameterLearning != NULL)
		delete parameterLearning;
}


/****************************************************************************
*					ImprovedGreedyAlgorithm::learn
*
*	Description:	Main Learning Procedure
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
int ImprovedGreedyAlgorithm::learn (DataSet& dataSetLearn, PGMStruct& pgmStruct) {
	// Constructing a set of features with high empirical probability (each feature is described as a pair of variableID and variableValue)
	std::vector <std::vector < std::pair<unsigned int, unsigned int> > > featsWithHighEP, featsWithHighEQ, featsInPGM;
	unsigned int curLastFeat;
	std::vector <unsigned int> featsSizes;
	getFeatsHighEP (featsWithHighEP, dataSetLearn);
	// Calculating empirical probabilities of the constructed features
	std::vector <unsigned __int32> cvec;
	std::vector <float> epValuesHighEP;
	epValuesHighEP.resize(featsWithHighEP.size());
	for (unsigned int t = 0; t < featsWithHighEP.size(); t++) {
		cvec = dataSetLearn.dataPerFeature[featsWithHighEP[t][0].first][featsWithHighEP[t][0].second];
		for (unsigned int r = 1; r < featsWithHighEP[t].size(); r++) {
			for (unsigned int s = 0; s < cvec.size(); s++) {
				cvec[s] &= dataSetLearn.dataPerFeature[featsWithHighEP[t][r].first][featsWithHighEP[t][r].second][s];
			}
		}
		epValuesHighEP[t] = ((float) (DataSet::bitSum(cvec))) / ((float) dataSetLearn.numOfSamples);
	}
	
	// Main cycle of the learning algorithm
	featsInPGM.clear();
	unsigned int numberOfFeatsInPGM = 0;
	unsigned int numOfHighEP = featsWithHighEP.size();
	std::vector <bool> isIncluded, isIncEQ;
	isIncluded.resize(numOfHighEP, false);
	std::vector <unsigned int> domain, values;
	SimpleFeature* newFeature;
	std::vector <float> objFuncVals;
	unsigned int i, j, k = 0, l, objFuncValsSize = 0,  maxGainID = 0;
	int addRes;
	bool stopCrit, choosingByEQVal, needAnUpdate = false;
	float gain, eqVal, epVal, gainMax, eqValMax, epValMax;
	float newWeight, logZ, curObjFuncValue = 0.0;
	objFuncVals.resize(maxWholeNumOfFeats, 0.0);
	while (numberOfFeatsInPGM < maxWholeNumOfFeats) {
		k++; // number of iterations of this cycle 
		// Picking a feature from featsWithHighEP that brings the highest likelihood gain
		gainMax = -1.0;
		// to run conditional inference we first must run unconditional variant
		inference->run(logZ, pgmStruct);
		for (i = 0; i < numOfHighEP; i++) {
			if (isIncluded[i])
				continue;
			inference->run(eqVal, featsWithHighEP[i], pgmStruct);
			gain = likelihoodGain (epValuesHighEP[i], eqVal);
			if (gain > gainMax) {
				maxGainID = i;
				gainMax = gain;
				eqValMax = eqVal;
			}
		}
		// Cheching the stop conditions
		if ((gainMax < 0.0) || (gainMax < minLikelihoodGainHighEP))
			break;

		// Adding the selected feature to the PGM
		isIncluded[maxGainID] = true;
		j = featsWithHighEP[maxGainID].size();
		domain.resize(j);
		values.resize(j);
		for (i = 0; i < j; i++) {
			domain[i] = featsWithHighEP[maxGainID][i].first;
			values[i] = featsWithHighEP[maxGainID][i].second;
		}
		newFeature = new SimpleFeature();
		newFeature->setDesiredValues(values);
		// calculating weight
		newWeight = weightValue (epValuesHighEP[maxGainID], eqValMax);
		// actually adding a feature
		addRes = pgmStruct.addFeature(domain, newFeature, newWeight);
		if (addRes >= 0) { // if the feature was added - than conunting it
			// updating local description of pgm
			if (numberOfFeatsInPGM >= featsInPGM.size())
				featsInPGM.resize(numberOfFeatsInPGM + minVecResize);
			featsInPGM[numberOfFeatsInPGM] = featsWithHighEP[maxGainID];
			// updating variable for number of features in pgm
			numberOfFeatsInPGM++;
		} else // if the feature was not added - choosing a new one over again
			continue;
		// Running exact parameter learning (previously set weight was an approximation)
		curObjFuncValue = parameterLearning->learn(dataSetLearn, pgmStruct, inference);

		// Constructing features with high expected value w.r.t. probability distribution defined by current state of the PGM
		getFeatsHighEQ (featsWithHighEQ, curLastFeat, featsSizes, maxSizeOfArbitraryFeats, featsWithHighEP[maxGainID], pgmStruct);
		// setting vector isIncEQ showing that feature featsWithHighEQ[i] is already in the pgm or is in the vector featsWithHighEQ earlier
		isIncEQ.resize(curLastFeat, false);
		for (i = 0; i < curLastFeat; i++) {
			isIncEQ[i] = false;
			for (j = 0; j < numberOfFeatsInPGM; j++) {
				if (featsWithHighEQ[i] == featsInPGM[j]) {
					isIncEQ[i] = true;
					break;
				}
			}
			if (isIncEQ[i])
				continue;
			for (j = 0; j < i; j++) {
				if (featsWithHighEQ[i] == featsWithHighEQ[j]) {
					isIncEQ[i] = true;
					break;
				}
			}
		}

		for (l = 0; l < maxNumOfFeatsHighEQAdded; l++) {
			// Calculating empirical probability of the constucted features and Selecting a feature that brings the highest gain in the objective function
			// Picking a feature from featsWithHighEP that brings the highest likelihood gain
			gainMax = -1.0;
			eqValMax = -1.0;
			choosingByEQVal = false;
			// to run conditional inference we first must to run unconditional variant
			inference->run(logZ, pgmStruct);
			for (i = 0; i < curLastFeat; i++) {
				// if size of feature is less then 2 - we do not consider it || this feature was considered earlier
				if ((featsSizes[i] < 2) || (isIncEQ[i]))
					continue;
				// running conditional inference for a current feature
				inference->run(eqVal, featsWithHighEQ[i], featsSizes[i], pgmStruct);
				epVal = dataSetLearn.calculateEmpiricalProb(featsWithHighEQ[i], featsSizes[i]);
				/*if (epVal < eps) { // gain would be infinity
					// if empirical probability of the feature is 0 - then selecting a feature with the max expected value
					if (eqVal > eqValMax) {
						maxGainID = i;
						gainMax = FLT_MAX;
						eqValMax = eqVal;
						epValMax = epVal;
						choosingByEQVal = true;
					}
				} else {
					if (choosingByEQVal) // gain of infinity - was reached earlier
						continue;
				*/	gain = likelihoodGain (epVal, eqVal);
					if (gain > gainMax) {
						maxGainID = i;
						gainMax = gain;
						eqValMax = eqVal;
						epValMax = epVal;
					}
				//}
			}
			// Cheching the stop conditions
			if ((gainMax < 0.0) || (gainMax < minLikelihoodGainHighEQ) || (eqValMax < epValMax))
				// no candidates	too small gain						   the chosen feature is not from the domain where eq is high and ep is low
				break;

			// Adding the selected feature to the PGM
			j = featsSizes[maxGainID];
			domain.resize(j);
			values.resize(j);
			for (i = 0; i < j; i++) {
				domain[i] = featsWithHighEQ[maxGainID][i].first;
				values[i] = featsWithHighEQ[maxGainID][i].second;
			}
			newFeature = new SimpleFeature();
			newFeature->setDesiredValues(values);
			// calculating weight
			if (epValMax < eps) { // eqValMax cannot be 0 - that's why checking only epValMax
				newWeight = 0.0;
				needAnUpdate = true; // need to optimize this weight value
			} else
				newWeight = weightValue (epValMax, eqValMax);
			// actually adding a feature
			addRes = pgmStruct.addFeature(domain, newFeature, newWeight);
			if (addRes >= 0) {
				// updating local description of pgm
				if (numberOfFeatsInPGM >= featsInPGM.size())
					featsInPGM.resize(numberOfFeatsInPGM + minVecResize);
				featsInPGM[numberOfFeatsInPGM] = featsWithHighEQ[maxGainID];
				// updating variable for number of features in pgm
				numberOfFeatsInPGM++;
				// optimizing weights if the last weight '0' was added
				if (needAnUpdate) {
					curObjFuncValue = parameterLearning->learn(dataSetLearn, pgmStruct, inference);
					needAnUpdate = false;
				}
			}
			if ((numberOfFeatsInPGM >= maxWholeNumOfFeats) || (addRes < 0))
				break;
		}
		// Running exact parameter learning (previously set weights were an approximation)
		objFuncVals[objFuncValsSize] = parameterLearning->learn(dataSetLearn, pgmStruct, inference);
		objFuncValsSize++;

		// ptintout statistics: 
		std::cout << "\nMain cycle iteration: " << k << ", last obj-function value: " << objFuncVals[objFuncValsSize-1] << "\t\t";
		
		// checking stop condition
		if (objFuncValsSize > numIterWithSameValue) {
			if (objFuncVals[objFuncValsSize - 1] != objFuncVals[objFuncValsSize - 1])
				continue;
			stopCrit = true;
			for (i = objFuncValsSize - numIterWithSameValue; i < (objFuncValsSize - 1); i++) 
				if  (std::abs(objFuncVals[objFuncValsSize - 1] - objFuncVals[i]) > eps) {
					stopCrit = false;
					break;
				}
			if (stopCrit)
				break;
		}
	}

	return 0;
}


/****************************************************************************
*					ImprovedGreedyAlgorithm::setParameters
*
*	Description:	Set object patameters from environment
*	Parameters:		environment - a set of parameters read from config file
*	Returns:		0
*
****************************************************************************/
int ImprovedGreedyAlgorithm::setParameters (Environment &environment) {
	double valD;
	int valI;
	std::string valStr;
	if (environment.getDoubleParamValue(valD, "ThresholdForIndividualFeats") == 0)
		thresholdForIndividualFeats = valD;
	if (environment.getDoubleParamValue(valD, "ThresholdForArbitraryFeats") == 0)
		thresholdForArbitraryFeats = valD;
	if (environment.getIntParamValue(valI, "MinNumOfIndivFeatsPerVariable") == 0)
		minNumOfIndivFeatsPerVariable = valI;
	if (environment.getIntParamValue(valI, "MaxSizeOfArbitraryFeats") == 0)
		maxSizeOfArbitraryFeats = valI;
	if (environment.getIntParamValue(valI, "MaxNumOfFeatsHighEQAdded") == 0)
		maxNumOfFeatsHighEQAdded = valI;
	if (environment.getIntParamValue(valI, "MaxWholeNumOfFeats") == 0)
		maxWholeNumOfFeats = (unsigned int) valI;
	if (environment.getDoubleParamValue(valD, "MinLikelihoodGainHighEP") == 0)
		minLikelihoodGainHighEP = valD;
	if (environment.getDoubleParamValue(valD, "MinLikelihoodGainHighEQ") == 0)
		minLikelihoodGainHighEQ = valD;
	if (environment.getIntParamValue(valI, "NumIterWithSameValue") == 0)
		numIterWithSameValue = (unsigned int) valI;
	if (environment.getStringParamValue(valStr, "AlgorithmForRunningInference") == 0) {
		InferenceFactory inferenceFactory;
		inference = inferenceFactory.createObjectByName (valStr);
		inference->setParameters(environment);
	} else {
		std::cout << "\nError in ImprovedGreedyAlgorithm::setParameters: cannot find specification of inference algorithm in the config file.\n";
	}
	if (environment.getStringParamValue(valStr, "AlgorithmForParameterLearning") == 0) {
		ParameterLearningFactory parameterLearningFactory;
		parameterLearning = parameterLearningFactory.createObjectByName (valStr);
		parameterLearning->setParameters(environment);
	} else {
		std::cout << "\nError in ImprovedGreedyAlgorithm::setParameters: cannot find specification of an algorithm for parameters learning in the config file.\n";
	}
	
	return 0;
}


/****************************************************************************
*					ImprovedGreedyAlgorithm::getFeatsHighEP
*
*	Description:	Returns a set of features with high empirical probability
*	Parameters:		featsWithHighEP - descriprion of simple features with high 
*						value of empirical probability (EP)
*					dataSet - data set that is used for learning
*	Returns:		0
*
****************************************************************************/
int ImprovedGreedyAlgorithm::getFeatsHighEP (std::vector <std::vector < std::pair<unsigned int, unsigned int> > >& newFeats, DataSet& dataSet) {
	std::vector <std::vector < std::pair<unsigned int, unsigned int> > > featsWithHighEP;
	unsigned int nVars = dataSet.maxValues.size();
	std::vector <std::vector <unsigned int> > indivFeatIDs;
	indivFeatIDs.resize(nVars);

	// Starting by fulfilling "indivFeatIDs" with features with high EP (and at least certain number of features from each variable)
	std::vector < unsigned int > sumsPerVarValues, sumsPerVarValuesOrig;
	unsigned int i, j;
	unsigned int trhesholdIndiv;
	for (i = 0; i < dataSet.dataPerFeature.size(); i++) {
		sumsPerVarValues.resize(dataSet.dataPerFeature[i].size());
		// Calculating empirical probabilities of indivifual features
		for (j = 0; j < dataSet.dataPerFeature[i].size(); j++) {
			sumsPerVarValues[j] = dataSet.bitSum(dataSet.dataPerFeature[i][j]);
		}
		// Sorting empirical probabilities of individual features
		sumsPerVarValuesOrig = sumsPerVarValues; // saving to use later
		std::sort(sumsPerVarValues.begin(), sumsPerVarValues.end());
		// Calculating a threshold for empirical probability: 
		if (minNumOfIndivFeatsPerVariable > sumsPerVarValues.size()) {
			// If number of required elements from this variable is more then the number of its values - we take all values as individual features
			trhesholdIndiv = sumsPerVarValues[0];
		} else {
			// If minimum number of required elements is less than number of this variable values
			// than we calculate a threshold from number of samples in data set and thresholdForIndividualFeats
			trhesholdIndiv = (unsigned int) (thresholdForIndividualFeats * dataSet.data.size());
			// If current value of trhesholdIndiv is leaving less then minNumOfIndivFeatsPerVariable variable values then 
			// we set trhesholdIndiv so the number of values taken from this variable is at least minNumOfIndivFeatsPerVariable
			if (minNumOfIndivFeatsPerVariable > 0) {
				if (sumsPerVarValues[sumsPerVarValues.size() - minNumOfIndivFeatsPerVariable] < trhesholdIndiv)
					trhesholdIndiv = sumsPerVarValues[sumsPerVarValues.size() - minNumOfIndivFeatsPerVariable];
			}
		}
		// Getting features from 'sumsPerVarValuesOrig' with high EP and saving to 'indivFeatIDs[i]'
		indivFeatIDs[i].clear();
		for (j = 0; j < sumsPerVarValuesOrig.size(); j++) {
			if (sumsPerVarValuesOrig[j] >= trhesholdIndiv)
				indivFeatIDs[i].push_back(j);
		}
	}

	// Calculating threshold for EP of features (using length of the data set)
	this->threshold = (unsigned int) (thresholdForArbitraryFeats * dataSet.data.size());

	// Combining individual features in order to get a set of features with high EP
	combineFeatures (0, indivFeatIDs, featsWithHighEP, dataSet);

	// Erasing features that consist of only one individual feature
	i = featsWithHighEP.size();
	std::vector <bool> isOne;
	isOne.resize(i, true);
	unsigned int sumNewOne = i;
	for (j = 0; j < i; j++) {
		if (featsWithHighEP[j].size() <= 1) {
			sumNewOne--;
			isOne[j] = false;
		}
	}
	newFeats.resize(sumNewOne);
	unsigned int k = 0;
	for (j = 0; j < i; j++) {
		if (isOne[j]) {
			newFeats[k] = featsWithHighEP[j];
			k++;
		}
	}

	/*
	// TESTING OUTPUT of created features with thier EP (count of how many times they are met in the data set)
	unsigned int ssum;
	std::cout << "\n";
	std::vector <unsigned __int32> cvec;
	for (unsigned int t = 0; t < sumNewOne; t++) {
		cvec = dataSet.dataPerFeature[newFeats[t][0].first][newFeats[t][0].second];
		for (unsigned int r = 1; r < newFeats[t].size(); r++) {
			for (unsigned int s = 0; s < cvec.size(); s++) {
				cvec[s] &= dataSet.dataPerFeature[newFeats[t][r].first][newFeats[t][r].second][s];
			}
		}
		ssum = DataSet::bitSum(cvec);
		std::cout << "\t" << t << ")";
		for (unsigned int r = 0; r < newFeats[t].size(); r++) {
			std::cout << " [" << newFeats[t][r].first + 1 << ", " << newFeats[t][r].second + 1 << "]";
		}
		std::cout << "\tsum is " << ssum << "\n";

	}*/

	return 0;
}


/****************************************************************************
*					ImprovedGreedyAlgorithm::combineFeatures
*
*	Description:	Combines individual features in order to get a set of features with high EP
*	Parameters:		step - index of indivFeats that shows which variable is considered now
*					indivFeats - descriprion of individual features that have high empirical probability
*					feats - output of the function - features with high empirical probability
*	Returns:		0
*
****************************************************************************/
int ImprovedGreedyAlgorithm::combineFeatures (const unsigned int& step,
	std::vector <std::vector <unsigned int> >& indivFeats, 
	std::vector <std::vector < std::pair<unsigned int, unsigned int> > >& feats,
	DataSet& dataSet ) const 
{
	// Gone through all individual features
	if (step >= indivFeats.size()) {
		feats.clear();
		return 0;
	}

	// Going to the next step of the recursion
	combineFeatures (step + 1, indivFeats, feats, dataSet);

	// Trying to combine all individual features from indivFeats[step] with all features in feats
	unsigned int i, j, k, sz;
	sz = feats.size(); // we use 'sz' because size of feats can be changed
	for (i = 0; i < sz; i++) {
		// if size of feature feats[i] cannot be enlarged then skip thid feature
		if (feats[i].size() >= maxSizeOfArbitraryFeats)
			continue;
		// calculating data representation for feature 'feats[i]'
		std::vector <unsigned __int32> curRep;
		curRep = dataSet.dataPerFeature[feats[i][0].first][feats[i][0].second];
		for (j = 1; j < feats[i].size(); j++) {
			for (k = 0; k < curRep.size(); k++)
				curRep[k] &= dataSet.dataPerFeature[feats[i][j].first][feats[i][j].second][k];
		}
		// trying to combine with all individual features in 'indivFeats[step]'
		std::vector <unsigned __int32> newRep;
		for (j = 0; j < indivFeats[step].size(); j++) {
			newRep = curRep;
			for (k = 0; k < curRep.size(); k++)
				newRep[k] &= dataSet.dataPerFeature[step][ indivFeats[step][j] ][k];
			// If the EP of the new feature is more than a threshold - then we add this new feature to the list 
			if (DataSet::bitSum(newRep) >= threshold) {
				feats.push_back(feats[i]);
				feats[feats.size() - 1].push_back(std::make_pair(step, indivFeats[step][j]));
			}
		}
	}

	// Adding all individual features from indivFeats[step] to feats
	sz = feats.size();
	feats.resize(sz + indivFeats[step].size());
	for (i = 0; i < indivFeats[step].size(); i++)
		feats[i + sz].resize(1, std::make_pair(step, indivFeats[step][i]));

	return 0;
}


/****************************************************************************
*					ImprovedGreedyAlgorithm::combineFeatures
*
*	Description:	Returns a set of features that have high expected value w.r.t. PGM
*	Parameters:		newFeats - vector of constructed features with high expected values
*					curLastFeat - last filled here element in vector newFeats
*					newFeatsSizes - sizes of elements in newFeats
*					maxFeatSize - max permited size of a feature
*					lastAddedFeature - descriprion of the feature that was last added to the PGM
*					pgmStruct - description of the PGM
*	Returns:		0
*
****************************************************************************/
int ImprovedGreedyAlgorithm::getFeatsHighEQ (
		std::vector <std::vector < std::pair<unsigned int, unsigned int> > >& newFeats, 
		unsigned int& curLastFeat,
		std::vector <unsigned int>& newFeatsSizes,
		const unsigned int& maxFeatSize,
		std::vector < std::pair<unsigned int, unsigned int> >& lastAddedFeature, 
		PGMStruct& pgmStruct) const 
{
	unsigned int i, j, var, val, whereToStartFrom, byWhichToEnd;
	//  fulfilling indFeatsUSed
	std::vector < std::vector<bool> > indFeatUsed; // information about individual features that are used in the PGM
	indFeatUsed.resize(pgmStruct.nVars);
	for (i = 0; i < pgmStruct.nVars; i++)
		indFeatUsed[i].resize(pgmStruct.numOfValues[i], false);
	std::vector <unsigned int> prefVals;
	for (i = 0; i < pgmStruct.features.size(); i++) {
		for (j = 0; j < pgmStruct.features[i].featsIDs.size(); j++) {
			// we only consider features that have positive weight
			if (pgmStruct.weights[pgmStruct.features[i].weightsIDs[j]] > 0) {
				pgmStruct.featureTypes[pgmStruct.features[i].featsIDs[j]]->getDesiredValues(prefVals);
				if (prefVals.size() != pgmStruct.features[i].varsIDs.size()) 
					std::cout << "\nError in ImprovedGreedyAlgorithm::getFeatsHighEQ: feature does not correspond to the domain size.\n";
				else
					for (var = 0; var < prefVals.size(); var++)
						indFeatUsed[ pgmStruct.features[i].varsIDs[var] ][prefVals[var]] = true;
			}
		}
	}
	// constructing features with high expected value
	curLastFeat = 0;
	for (i = 0; i < lastAddedFeature.size(); i++) {
		if (curLastFeat >= newFeats.size()) {
			newFeats.resize(curLastFeat + minVecResize);
			newFeatsSizes.resize(curLastFeat + minVecResize);
		}
		if (newFeats[curLastFeat].size() < maxFeatSize)
			newFeats[curLastFeat].resize(maxFeatSize);
		newFeats[curLastFeat][0] = lastAddedFeature[i];
		newFeatsSizes[curLastFeat] = 1;
		whereToStartFrom = curLastFeat;
		curLastFeat++;
		byWhichToEnd = curLastFeat;
		// Starting to collect different variants of features (constructed from individual features used the PGM)
		for (var = 0; var < indFeatUsed.size(); var++) {
			// all features are constucted starting from individual feature 'lastAddedFeature[i]' - so, we skip its domain 
			if (var == lastAddedFeature[i].first)
				continue;
			for (val = 0; val < indFeatUsed[var].size(); val++) {
				// if particular individual feature is not in the PGM - do not use it while construction
				if (!indFeatUsed[var][val])
					continue;
				// Going through features in vector newFeats and trying to combine them with individual feature [var, val]
				for (j = whereToStartFrom; j < byWhichToEnd; j++) {
					// if feature j is too big - there is no space to make it bigger - than skipping it
					if (newFeatsSizes[j] >= maxFeatSize) 
						continue;
					// combining feature with [var, val] and adding to the newFeats vector
					if (curLastFeat >= newFeats.size()) {
						newFeats.resize(curLastFeat + minVecResize);
						newFeatsSizes.resize(curLastFeat + minVecResize);
					}
					newFeats[curLastFeat] = newFeats[j];
					newFeats[curLastFeat][newFeatsSizes[j]] = std::make_pair<unsigned int, unsigned int>(var, val);
					newFeatsSizes[curLastFeat] = newFeatsSizes[j] + 1;
					curLastFeat++;
				}
			}
			// updating current end index of the vector - defines what features could be upgraded
			byWhichToEnd = curLastFeat;
		}
	}
	return 0;
}
