package org.mentalsmash.crossroads.web;

import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;

import org.apache.log4j.Logger;
import org.mentalsmash.crossroads.Configuration;
import org.mentalsmash.crossroads.DictionarySource;


public class WebDictionarySource implements DictionarySource {

	private static Logger log = Logger.getLogger(WebDictionarySource.class);
	private WebSearcher _searcher;
	private int _numResults = Configuration.getInstance().getDictionarySize();
	
	public WebDictionarySource(){
		_searcher = new WebSearcher();
	}
	
	public WebDictionarySource(int dictionarySize, String lang_code){
		_searcher = new WebSearcher(lang_code,false);
		_numResults = dictionarySize;
	}
	
	@Override
	public List<String> getDictionaryForDefinition(String definition, int length) {
		
		List<String> dictionary = null;
		log.info("Searching the web for definition \"" + definition + "\"..." );
		//Uses a top k implementation similar to the B0 algorithm 
		if(definition == null || definition.equals("")){
			log.warn("Empty definition. Not performing web search");
			return new ArrayList<String>();
		}
		
		WebSearchResult[] resultSet = _searcher.doSearch(definition);
		if(resultSet.length == 0)
			log.warn("Result set is empty!!");
		
		HtmlFilter filter = new HtmlFilter(length);
		
		
		//Uses a top k implementation similar to the B0 algorithm 

		ArrayList<ArrayList<String>> sources = buildDictionaries4Sources(
				resultSet, filter);
		
		dictionary = topKwords(sources);
		
		log.info("Done.");
		return dictionary;
	}

	private ArrayList<ArrayList<String>> buildDictionaries4Sources(
			WebSearchResult[] resultSet, HtmlFilter filter) {
		// I'm building an array of dictionaries: each array in the top level array is a different dictionary (containing at most
		// _numResults words from a single web source. Having such a structure is kind of a prerequisite for my B0 like top-k
		//implementation
		ArrayList<ArrayList<String>> sources = new ArrayList<ArrayList<String>>(resultSet.length); 
		int webResultIndex = 0;
		
		while ( webResultIndex != resultSet.length){

			String[] resultsFromPage = null;
			URL currentUrl = resultSet[webResultIndex].getPageURL();
			try {
				
				if(isBinaryDocUrl(currentUrl)){
					log.debug(currentUrl + " was not pointing to an html document. Ignored.");
					resultsFromPage = null;
				} else {
					log.debug("Filtering words from " + currentUrl + " ...");
					resultsFromPage = filter.filter(resultSet[webResultIndex].getPageURL());
					log.debug("Done. Filtered " + resultsFromPage.length + " words");
				}
			}catch(FilterException e){
				log.debug("Error while retrieving page from " + currentUrl + ". " + e.getMessage());
			}
			finally {
				webResultIndex++;
			}
			
			ArrayList<String> dictFromCurrentPage = new ArrayList<String>();
			if(resultsFromPage != null) {
				for(String resFromPage : resultsFromPage) 
					dictFromCurrentPage.add(resFromPage.toLowerCase());
			}
			
			sources.add(dictFromCurrentPage);
		
		}
		return sources;
	}
	
	protected boolean isBinaryDocUrl(URL url){
		Scanner extScn = new Scanner(url.getFile().toLowerCase());
		String extension = extScn.findInLine("\\..{3,4}$");
		if (extension == null)
			return false;
		for (String invalid : Configuration.getInstance().getIgnoreExtensions()){
			if(extension.equalsIgnoreCase(invalid))
				return true;
		}
		return false;
	}

	/**
	 * B0 like implementation of a top-k algorithm. Ranks results from a paging basing it on the order of the page returned from 
	 * the google search
	 * @param dictionariesFromSources a list of dictionaries representing the sources to extract the top k results from
	 * @return an array containing at most k (i.e. _numResults) words
	 */
	private List<String> topKwords(ArrayList<ArrayList<String>> dictionariesFromSources){
		int numSources = dictionariesFromSources.size();
		//Home made scoring function: the score of each source is between 1 and 0. This range is divided in an exponential way between the
		//available sources. The first is the more relevant
		
		//factor used to normalized the position of the source for the exponential calc
		double scoreNormalizingFactor = 1/10;
		int currentSource = 0;
		
		// contains all the words found with their score
		HashMap<String, Double> scoresMap = new HashMap<String, Double>();
		for(currentSource = 0; currentSource < numSources; currentSource++){
			ArrayList<String> currentDictionary = dictionariesFromSources.get(currentSource);
			double currentScore = 1/Math.exp(-(numSources - (currentSource+1))*scoreNormalizingFactor);
			for (String word : currentDictionary){
				double newScore = currentScore;
				if(scoresMap.containsKey(word)){ // updates its score
					newScore += scoresMap.get(word);
				}
			
				scoresMap.put(word, newScore);
			}
		}
		
		//Now I'm taking the best k words from the map
		ArrayList<String> everyWord = new ArrayList<String>(scoresMap.keySet());
		final Map<String, Double> fscoresMap = Collections.unmodifiableMap(scoresMap);
		
		Collections.sort(everyWord,new Comparator<String>() {
			@Override
			public int compare(String arg0, String arg1) {
				//each object in the list to sort is for hypothesis contained in the map
				return fscoresMap.get(arg1).compareTo(fscoresMap.get(arg0));
			}
			
		}
		);
		

		int lastIndex = everyWord.size() < _numResults ? everyWord.size() : _numResults;
		return everyWord.subList(0, lastIndex);
	
	}
}
