package anatex.kea.kea;


import java.util.*;

import anatex.kea.*;
import anatex.kea.stemmers.PhraseStemmer;
import anatex.domain.CustomLocale;
import anatex.domain.Document;
import anatex.domain.TrainingDocument;
import anatex.domain.KeywordExtractionModel;


public class Kea extends AbstractAlgo {
	
	private Locale locale = new Locale("en", "US");
	
	private Document document;
	
	private List<TrainingDocument> trainingDocuments;
	
	private PhraseStemmer stemmer = null;
	
	private AttributesCalculator calculator = null;
	
	//stem phrases per document
	private static HashMap<Long, ArrayList<ArrayList<String>>> stemPhrases = null;
	
	//candidate phrases per document
	private static HashMap<Long, ArrayList<CandidatePhrase>> candidatePhrases = null;
	
	public Kea(Document doc){
		document = doc;
		CustomLocale l = doc.getCustomLocale();
		trainingDocuments = null;
		
		locale = new Locale(l.getShort_language(), l.getShort_country());
	}
	public Kea(){
		document = null;
		trainingDocuments = null;
	}
	
	public void loadHelpers() {
		if (null == stemmer) {
			stemmer = new PhraseStemmer();
			calculator = new AttributesCalculator();
			calculator.setTrainingSet(trainingDocuments);
		}
	}

	@Override
	public void extract(Document document, KeywordExtractionModel model) throws KeywordExtractorAlgoException {
		this.document = document;
		
//		Genitor g = new Genitor();
//		g.setModel(model);
//		g.extractAndSaveKeywordsForDocument(document);
	}
	
	@Override
	public void setDocument (Document doc) {
		document = doc;
	}
	
	@Override
	public void setTrainingDocuments (List<TrainingDocument> trainingDocuments) {
		this.trainingDocuments = trainingDocuments;
	}
	
	@Override
	public void train (List<TrainingDocument> td) throws KeywordExtractorAlgoException {
		setTrainingDocuments(td);
		loadHelpers();
		stemPhrases 		= new HashMap<Long, ArrayList<ArrayList<String>>>();
		candidatePhrases 	= new HashMap<Long, ArrayList<CandidatePhrase>>();
		
		//extract candidate phrases from all documents in training set
		for (TrainingDocument trainingDocument : trainingDocuments) {
			Document document = trainingDocument.getDocument();
			
			stemmer = new PhraseStemmer(); 
			stemmer.setCustomLocale(document.getCustomLocale());
			stemPhrases.put(document.getId(), stemmer.getStemPhrases(document.getContent()));
		}
		
		System.out.println(stemPhrases);
		
		//calculate attributes for each document in the training set
		for (TrainingDocument trainingDocument : trainingDocuments) {
			Document document = trainingDocument.getDocument();
			
			calculator = new AttributesCalculator();
			calculator.setTrainingSet(trainingDocuments);
			calculator.setDocument(document);
			calculator.setStemPhrases(stemPhrases.get(document.getId()));
			
			candidatePhrases.put(document.getId(), calculator.calculateCandidatePhraseAttributes());
		}
		
		System.out.println(candidatePhrases);
	}
	
	public static HashMap<Long, ArrayList<ArrayList<String>>> getStemPhrases() {
		return stemPhrases;
	}

}
