<?php

include_once('./FileParser.php.inc');
include_once('./IRutils.php.inc');
include_once('./Document.php.inc');
include_once('./Evaluator.php.inc');
include_once('./wordfilter.php.inc');


$fileparser=new FileParser();
Filter::$stoplist=$fileparser->parseStopwords(Filter::$stoplist); //Load stoplist to filter
$useSVD=false;
//var_dump(Filter::$stoplist);

//lab123($fileparser);
//runSVM($fileparser);
runLSI($fileparser);


function runLSI($fileparser){
	global $useSVD;
	$usePageRank=false;
	$useSVD=true;
	echo("<---------Using stopwords LSI--------->\r\n"); //count all words but remove stopwords, stopwords implies basic
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::STOPLIST]=1;
	Filter::$filterSettings[FilterSetting::STEMMING]=1;
	runAll($fileparser, $usePageRank);
}

function useLSI($collection, $filepath, $queryCollection){
	global $fileparser;
	$collection->mapDocsTerms2numbers();
	$collection->saveWeightMatrixCompressed();
	IRutils::runSVD(10);

	$Vt=IRutils::readV();
	echo "Nbr of terms: ".count($collection->mapTerms)."\n";
	echo "Vt rows: " . count($Vt);
	echo " Vt cols: " . count($Vt[0])."\n";
	//var_dump($Vt);
	
	$sigma=IRutils::readS();
	$Ut=IRutils::readUt();
	$sigma=IRutils::invDiagonalMatrix($sigma);
	//echo "Nbr of terms: ".count($collection->mapTerms)."\n";
	echo "Ut rows: " . count($Ut);
	echo " Ut cols: " . count($Ut[0])."\n";
	
	echo "Sigma rows: " . count($sigma);
	echo " Sigma cols: " . count($sigma[0])."\n";
	$result = IRutils::matrixmult($sigma, $Ut);
	echo "Result rows: " . count($result);
	echo " Result cols: " . count($result[0])."\n";
	$fileparser->saveConcepts($result, $collection);
	$fileparser->saveConcepts2($result, $collection);
	
	$collection->owerwriteWeights($Vt);
	$queryMatrix = array();
	$i = 0;
	foreach($collection->mapTerms as $term => $index){
		$j = 0;
		$queryMatrix[$i] = array(); 
		foreach($queryCollection->docs as $doc){
			if(isset($doc->weights[$term])){
				$queryMatrix[$i][$j] = $doc->weights[$term];
			}
			else{
				$queryMatrix[$i][$j] = 0;
			}
			$j++;
		}
		$i++;
	}
	
	$queryMatrix = IRutils::matrixmult($result, $queryMatrix);
	echo "q_k rows: " . count($queryMatrix);
	echo " q_k cols: " . count($queryMatrix[0])."\n";
	$queryCollection -> mapDocsTerms2numbers();
	$queryCollection -> owerwriteWeights($queryMatrix);
}



function runSVM($fileparser){
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::STOPLIST]=1;
	Filter::$filterSettings[FilterSetting::STEMMING]=1;
	
	$posColl = new Collection();
	$fileparser->parseCollection($posColl,'./data/CP.xml',1);
	$negColl = new Collection();
	$fileparser->parseCollection($negColl,'./data/nonCP.xml',-1);
	$coll = $posColl->mergeCollectionDocs($negColl);
	// < Lab1 > Indexing the collection, calculation term frequency, inverse document frequency and the combined TfIdf- factor
	$coll->calcTermDocFreq();
	$coll->calcIdfFactor();
	$coll->calcTfIdf();
	
	//$coll -> normalizeWeights();
	
	
	
	$testColl = new Collection();
	$fileparser-> parseCollection($testColl,'./data/testSVM.xml',0);
	
	
	/*$testColl->calcTermDocFreq();
	$newTermDocFreq=array();
	foreach ($testColl->termDocFreq as $term => $freq) {
		if(isset($coll->termDocFreq[$term])){
					$newTermDocFreq[$term]=$freq;
		}
	}*/
	
	//Clean the documents from terms that do not exist in the collection
	
	//collect all terms from coll
	$allTerms=array();
	foreach($coll->docs as $doc){
		foreach($doc->termFreq as $term => $freq){
			if(!isset($allTerms[$term])){
				$allTerms[$term]=1;
			}
		}
	}
	//echo(count($allTerms)." exists");
	
	//Clean testcollection
	foreach($testColl->docs as $doc){
		$doc->filterTermFrequency($allTerms);
	}
	
	//$testColl->calcTermDocFreq();
	//$testColl->calcIdfFactor();
	$testColl->termDocFreq=$coll->termDocFreq;
	$testColl->idfFactors = $coll->idfFactors;
	$testColl->calcTfIdf();
	
	
	
	
	$test_classifications = array(1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1,
			-1, -1, -1, -1, -1, -1, -1, 1, 1);
	$testColl -> changeLabels($test_classifications);
	
	//scaling
	$max1=$coll->getMaxWeight();
	$max2=$testColl->getMaxWeight();
	$max=$max2;
	if($max1>$max2){
		$max=$max1;
	}
	
	$coll->normalizeWeights($max);
	$testColl->normalizeWeights($max);	

	$coll -> mapDocsTerms2numbers();
	$testColl -> mapDocsTerms2numbers();
	
	$coll -> printToLibSVMFormat('./data/libSVMInput.txt');
	$testColl -> printToLibSVMFormat('./data/libSVMtestdata.txt');
	
}


function lab123($fileparser){
	$usePageRank=false;
	echo("<---------Not using stopwords--------->\r\n"); //Set the filtersettings to basic (count all words)
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::BASIC]=1;
	runAll($fileparser, $usePageRank);

	echo("<---------Using stopwords--------->\r\n"); //count all words but remove stopwords, stopwords implies basic
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::STOPLIST]=1;
	runAll($fileparser, $usePageRank);

	echo("<---------Using bigrams and stoplist--------->\r\n"); //count all words but remove stopwords
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::STOPLIST]=1;
	Filter::$filterSettings[FilterSetting::BIWORD]=1;
	runAll($fileparser, $usePageRank);

	echo("<---------Using trigrams and stoplist--------->\r\n"); //count all words but remove stopwords
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::STOPLIST]=1;
	Filter::$filterSettings[FilterSetting::TRIWORD]=1;
	runAll($fileparser, $usePageRank);
	
	echo("<---------Using stemming and stoplist--------->\r\n"); //count all words but remove stopwords
	Filter::$filterSettings=array();
	Filter::$filterSettings[FilterSetting::STOPLIST]=1;
	Filter::$filterSettings[FilterSetting::STEMMING]=1;
	runAll($fileparser, $usePageRank);
}



function runAll($fileparser, $usePageRank){
	global $useSVD;
	
	$coll = prepareCollection($fileparser);//
	$queries = $fileparser->parseQuieries();
	$queryCollection = prepareQuery($queries, $coll);
	
	if($useSVD){
		echo("SVD used\r\n");
		useLSI($coll, "svdInputColl.txt", $queryCollection);
		
		//useLSI($queryCollection, "svdInputQue.txt");
	}
	//gör lsi för värden som vi hämtar ur collections
	//ta output och stoppa tillbaka i collections
	
	
	
	
	$evaluator = searchAndEvaluate($queryCollection,$queries,$coll, $usePageRank);
	printResults($evaluator);

}

function prepareCollection($fileparser){
	$coll = new Collection();
	$fileparser->parseCollection($coll, './data/cran600_Collection.xml', -1);
	//$fileparser->parseCollection($coll, './data/Collection.xml', -1);
	// < Lab1 > Indexing the collection, calculation term frequency, inverse document frequency and the combined TfIdf- factor
	$coll->calcTermDocFreq();
	$coll->calcIdfFactor();
	$coll->calcTfIdf();
	return $coll;
}

function prepareQuery($queries, $coll){
	//QURIES: Associative Array :: key = String query, val = Array<String> idsForRelevantDocsOfTheQuery
	

	//Create a queryCollection and add it to the document collection

	$queryCollection=new Collection();
	foreach($queries as $query_string => $relevants){
		$queryCollection->addDoc($query_string,"query",$query_string,-2);
	}
	//$queryCollection->calcTermDocFreq();
	$queryCollection->termDocFreq = $coll->termDocFreq;
	
	$queryCollection->idfFactors=$coll->idfFactors; #Use the idf factors from the document collection
	$queryCollection->calcTfIdf(); #This method also sets the weights for all the documents(The query only) and
	return $queryCollection;
}

/** < Lab1 >
 * Now the collection is ready to be searched using various techniques.
 *
 * First the Boolean model will be used. This technique consider a document relevant if it contains all words in the query     STÄMMER?
 *
 * Then the so called Vector space model boolean version is used. This technique consider all documents equaly relevant as
 * long as they contains one or more words that is in the query
 *
 * Then the vector space model (with floating point results) is used. In this model the documents are rated with a Reel value
 */
function searchAndEvaluate($queryCollection, $queries, $coll, $usePageRank){
	//<-------  SEARCH---------->
	//RESULTS: Associative Arrays :: key = String query, val = (Associative Array: key = docID, val = search result value)
	$searchResults=$coll->calculateLab1Models($queryCollection);

	//<--- Evaluate the search results--->
	$threshold = 0.2;
	$evaluator = new Evaluator($queries, $threshold);
	$evaluator->saveResults(Models::BOOL,  $searchResults[Models::BOOL]);
	$evaluator->saveResults(Models::VSM_BOOL,  $searchResults[Models::VSM_BOOL]);
	$evaluator->saveResults(Models::VSM,  $searchResults[Models::VSM]);

	if($usePageRank){
		//<------- ADDING PAGERANK (precalculated in java)--------->
		//PAGERANKS: Associative Array :: key = String docID, val = Float pagerank
		$pageranks=$fileparser->parsePageRank();
		//<---- Evaluate pagerank ---->
		$searchResults[Models::VSM_PAGERANK]=array();
		$searchResults[Models::VSM_PAGERANK]=$coll->calculateVSM_PAGERANK($pageranks, $searchResults[Models::VSM]);
		$evaluator->setThreshold(0.001);
		$evaluator->saveResults(Models::VSM_PAGERANK,  $searchResults[Models::VSM_PAGERANK]);
		//TODO
	}
	return $evaluator;
}

function printResults($evaluator){
	//<----- NICE PRINTOUTS ----->
	$searchResults = $evaluator->getSearchResults(Filters::TOP);
	$searchResults->calculateResults();
	//$searchResults->printAvg(Models::BOOL);
	//$searchResults->printAvg(Models::VSM_BOOL);
	
	//$searchResults->printResults(Models::VSM);
	//$searchResults->printAvg(Models::VSM_PAGERANK);
	//$searchResults->printResults(Models::VSM_PAGERANK);
	$searchResults->printTopTen(Models::VSM);
	$searchResults->printAvg(Models::VSM);

}


?>
