package com.ubb.smartsearch.classifier.naivebayes;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import javax.ejb.Stateless;

import com.ubb.smartsearch.classifier.ClassifierInterface;
import com.ubb.smartsearch.classifier.tool.Utils;

/**
 * 
 */

/**
 * @author Aron
 *
 */
@Stateless
public class NaiveBayesClassifierBean implements ClassifierInterface {

	/**
	 * 
	 */
	private static final long serialVersionUID = 1L;
	private Map<String, Map<Integer, Double>> wordDatabase;
	private Map<Integer, Double> categories;
	private Map<String, Integer> categoriesIndex;
	private int N;
	
	public String classifyDocument(String url) {

		System.out.println("Naive Bayes Classifier");
		Map<String, Integer> wordDBInDoc = new HashMap<String, Integer>();
		String text = Utils.extractTextFromURL(url);
		if(text == null) {
			return null;
		}
		List<String> tokens = Utils.tokenize(text);
		for(String token : tokens) {
			Integer wordNr = wordDBInDoc.get(token);
			if(wordNr != null) {
				++wordNr;
			} else {
				wordNr = 1;
			}
			wordDBInDoc.put(token, wordNr);
		}
		
		double min = Double.MAX_VALUE;
		double max = - Double.MAX_VALUE;
		String minCategory = null;
		String maxCategory = null;
		double log2 = Math.log(2);
		for(Entry<String, Integer> category : categoriesIndex.entrySet()) {
			double product = 0.0;
			for(String word : wordDBInDoc.keySet()) {
				Map<Integer, Double> wordDB = wordDatabase.get(word);
				if(wordDB == null) {
					continue;
				}
				
				Double temp = wordDB.get(category.getValue());
				if(temp == null) {
					continue;
				}
				product += Math.log(temp) / log2;
			}
			product += Math.log(this.categories.get(category.getValue())) / log2;
			if(product > max) {
				max = product;
				maxCategory = category.getKey();
			}
			if(product < min) {
				min = product;
				minCategory = category.getKey();
			}
			//System.out.println(category.getKey() + " ---- " + product);
		}
		System.out.println(maxCategory + " -- max -- " + max);
		System.out.println(minCategory + " -- min -- " + min);
		return maxCategory;
	}

	public void trainCategories(String trainCorpus)
			throws IOException {

		File trainFolder = Utils.getTrainFolderOfCorpus(trainCorpus);
		wordDatabase = new HashMap<String, Map<Integer, Double>>(15000);
		File[] categoryFolders = trainFolder.listFiles();
		categories = new HashMap<Integer, Double>(categoryFolders.length);
		categoriesIndex = new HashMap<String, Integer>(categoryFolders.length);
		N = 0;
		int categoryIndex = 0;
		for (File categoryFolder : trainFolder.listFiles()) {
			categoriesIndex.put(categoryFolder.getName(), categoryIndex);
			File[] trainFiles = categoryFolder.listFiles();
			int trainFilesNr = trainFiles.length; 
			N += trainFilesNr;
			categories.put(categoryIndex, (double)trainFilesNr);
			for (File trainFile : trainFiles) {
				List<String> tokens = Utils.tokenize(Utils.readFile(trainFile.getPath()));
				for(String token : tokens) {
					Map<Integer, Double> wordFrequency = wordDatabase.get(token);
					if(wordFrequency != null) {
						Double nr = wordFrequency.get(categoryIndex);
						if(nr != null) {
							++nr;
						} else {
							nr = 1.0;
						}
						wordFrequency.put(categoryIndex, nr);
					} else {
						wordFrequency = new HashMap<Integer, Double>(categoryFolders.length);
						wordFrequency.put(categoryIndex, 1.0);
						wordDatabase.put(token, wordFrequency);
					}
				}
			}
			++categoryIndex;
		}
		bayes();
	}

	private void bayes() {
		
		/*
		 * Kiszamolom a |PosJ| / N -et
		 */
		Map<Integer, Double> categoriesTemp = new HashMap<Integer, Double>(categories.size());
		for(Entry<Integer, Double> posJ : categories.entrySet()) {
			categoriesTemp.put(posJ.getKey(), (double)posJ.getValue() / N);
		}
		categories = categoriesTemp;

		/*
		 * Inicializalok egy sum hasito tablat, ebbe szamolom a szumma(Nlj) -t
		 * Vagyis azt, hogy minden kategoriaba hany darab szo van
		 */
		Map<Integer, Double> sum = new HashMap<Integer, Double>(categories.size());
		for(Integer actual : categories.keySet()) {
			sum.put(actual, 0.0);
		}
		
		/*
		 * Kiszamolom a szumma(Nlj) -t, ugy, hogy egy for-al vegig megyek a szavakon
		 * ezen belul pedig meg egy for-al bejarom a neki megfelelo hasito tablat, amibe el van tarolva,
		 * hogy melyik kategoriaba hanyszor szerepelt az illeto szo.
		 * Ezeket adogatom hozza a sum hasito tablaba
		 */
		for(Entry<String, Map<Integer, Double>> wk : wordDatabase.entrySet()) {
			for(Entry<Integer, Double> nlj : wk.getValue().entrySet()) {
				sum.put(nlj.getKey(), sum.get(nlj.getKey()) + nlj.getValue());
			}
		}
		
		/*
		 * Kiszamolom az elofordulasi valoszinuseget minden szonak minden kategoriaba
		 */
		Map<String, Map<Integer, Double>> wordDatabaseTemp = new HashMap<String, Map<Integer, Double>>(15000);
		for(Entry<String, Map<Integer, Double>> wk : wordDatabase.entrySet()) {
			Map<Integer, Double> wkHash = new HashMap<Integer, Double>();
			for(Entry<Integer, Double> nlj : wk.getValue().entrySet()) {
				wkHash.put(nlj.getKey(), (double)nlj.getValue() / sum.get(nlj.getKey()));
			}
			wordDatabaseTemp.put(wk.getKey(), wkHash);
		}
		wordDatabase = wordDatabaseTemp;
	}

	public void setRocchioParameters(double alfa, double beta) {}

}
