package org.NooLab.openNLP.components;

import java.util.ArrayList;


import org.NooLab.nlp.structures.NLPresultSet;
import org.NooLab.openNLP.utilities.LanguageProcessorTools;
import org.NooLab.structures.nlp.NlpPosition;
import org.NooLab.structures.nlp.NlpWord;
import org.NooLab.structures.nlp.PoStagged;
import org.NooLab.structures.nlp.WordSet;
import org.NooLab.structures.nlp.WordSets;
import org.NooLab.utilities.logging.PrintLog;
import org.NooLab.utilities.strings.ArrUtilities;
import org.NooLab.utilities.strings.StringsUtil;




public class SearchTermsPreparation {

	
	private LinguisticProcessor lingproc;
	
	NLPresultSet resultSet;
	WordSets ws;
	
	
	LanguageProcessorTools lptools = new LanguageProcessorTools();
	
	StringsUtil strgutil = new StringsUtil();
	PrintLog out = new PrintLog(2,false);
	
	// ========================================================================
	public SearchTermsPreparation(LinguisticProcessor lingproc, NLPresultSet resultset) {
		// 
		this.lingproc = lingproc;
		
		resultSet = resultset ;
		ws = new WordSets(); 
	}
	// ========================================================================
	
	
	public WordSets getWordSets( int redundancies) {
		// 
		
		ws = prepareSearchTerms();
		
		
		ws.disposeSmallSetsOf(new String[]{"ADJ","KO*","VA*","PD","ART","APPR","ADV"}) ;
		
		ws.collateSmallSets(1,5) ;
		
		ws.makeSetEntriesUnique() ;
		
		ws.removeMatchingSet(9) ; // 9 = empty, and any complete match... sets could be equal
		
		if (redundancies>0){
			// 1= if one set is completely contained in any other... remove it
			ws.removeMatchingSet(redundancies) ;
		}
		
		ws.publishToResults( resultSet, 1); // 1=labels only
		
		return ws;
	}
	

	private WordSets prepareSearchTerms() {
		
		ArrayList<PoStagged> taggedPosItems ;
		ArrayList<String> verbs ;
		
		// we create set of words for this sentence, which could be useful for a search
		// the search application layer could filter these sets further 
		
		/*
		 * important signatures
		 * 
		 * - verbs without VAFIN
		 * - mainPhrase : without -> *ART, ADV, VAFIN
		 * - subsequent noun phrases NN + NN , NN [a] NN    
		 * - coreSentence: only NN 
		 * - mainPhrase : NN + VV* 
		 * 
		 * - high weight: questions with nouns
		 * - creating singular forms
		 * 
		 */
		// these items are in the same order as in the sentence
		taggedPosItems = resultSet.getTaggedPoS() ;
		
		// any of them VAFIN ?
		verbs = resultSet.getCollectedVerbPhrases();
		
		// now, checking sentence snippets against PoSTagged list, determining types and filtering
		WordSet msSet1neg = new WordSet();
		/*
		 * PDS "das" like in "genau das ist"
		 */															// this should be put into a config file
		WordSet csSet1 = filteringPhrases( resultSet.getCoreSentence(), new String[]{"V*","*PER","PDS","PTK","*ART","APP*","ADV","PRO*","VA*","KO*","PR*","PP","NE","ADV*","PIS","PW*"}, -1 ) ;// 1 =matches will remain , -1=matches will be removed 
		WordSet csSet2 = filteringPhrases( resultSet.getCoreSentence(), new String[]{"*ART","APP*","ADV","PRO*","VA*","KO*","PR*","PP"}, -1 ) ;// 1 =matches will remain , -1=matches will be removed 
		WordSet csSet3 = filteringPhrases( resultSet.getCoreSentence(), new String[]{"NN"}, 1 ) ; csSet3.setRemovalAllowed(false) ;
		WordSet csSet4 = filteringPhrases( resultSet.getOriginalText(), new String[]{"NN","NE"}, 1 ) ;
		WordSet msSet1 = filteringPhrases( resultSet.getMainPhrase(), new String[]{"NN","NE","VV*"}, 1 ) ;
		
		if (negationIsPresent()){ // detecting not, none, neither-nor, rare, never, only
			// TODO: we need our own catalog for the languages, it s not qualified by PoSTagger
			msSet1neg = addNegationToSet(msSet1)  ;
			msSet1neg.setRemovalAllowed(false); // on match, do not remove from overall collection of sets
		}
		
		// small sequential patterns
		PrintLog.Print(1,resultSet.getAllPoSasStr());
		
		WordSets sqNouns = detectingSimplePattern( resultSet.getTaggedPoS() , new String[]{"NN","NE"},1) ; // 1 = max distance (by intermittent elements) 
		
		WordSets specsNouns = detectingSimplePattern( resultSet.getTaggedPoS() , new String[]{"ADJ*","NN"},1) ;
		
		WordSets specsVerbs = detectingSimplePattern( resultSet.getTaggedPoS() , new String[]{"VA*","ADJ*"},1) ;
		// WordSets
		
		
		int anyNounPos = lptools.findIndexOfNoun(resultSet.getTaggedPoS(), 0, 99, "", "");
		
		
		ArrayList<String> labels ;
		
		
		PrintLog.Print(4, "found word sets");
		labels = csSet1.getLabels();      out.print(4, "cs1 : " + ArrUtilities.arr2Text(labels, "; "));
		labels = csSet2.getLabels();      out.print(4, "cs2 : " + ArrUtilities.arr2Text(labels, "; "));
		labels = csSet3.getLabels();      out.print(4, "cs3 : " + ArrUtilities.arr2Text(labels, "; "));
		labels = csSet4.getLabels();      out.print(4, "cs4 : " + ArrUtilities.arr2Text(labels, "; "));
		
		labels = msSet1.getLabels();      out.print(4, "ms1 : " + ArrUtilities.arr2Text(labels, "; "));
		labels = msSet1neg.getLabels();   out.print(4, "msN : " + ArrUtilities.arr2Text(labels, "; "));
		
		labels = sqNouns.getLabels();     out.print(4, "sqN : " + ArrUtilities.arr2Text(labels, "; "));
		labels = specsNouns.getLabels();  out.print(4, "adjN: " + ArrUtilities.arr2Text(labels, "; "));
		
		labels = specsVerbs.getLabels();  out.print(4, "vaad: " + ArrUtilities.arr2Text(labels, "; "));
		
		
		if (anyNounPos <0){
			
		}else{
		

			// combining wordSets
			if (csSet1.size()>0){
				ws.add(csSet1);
			}
			if (csSet2.size()>0){
				ws.add(csSet2);
			}
			if (csSet3.size()>0){
				ws.add(csSet3);
			}
			if (csSet4.size()>0){
				ws.add(csSet4);
			}
			if (msSet1.size()>0){
				ws.add(msSet1);
			}
			if (sqNouns.size()>0){
				ws.addAll(sqNouns.getItems());
			}
			if (specsNouns.size()>0){
				ws.addAll(specsNouns.getItems());
			}
			if (specsVerbs.size()>0){
				ws.addAll(specsVerbs.getItems());
			}
		
		}
			
		
		
		return ws;
	}

	
	private WordSet addNegationToSet(WordSet inWSet) {
		WordSet ws = new WordSet();
		
		return inWSet;
	}
	
	private WordSet filteringPhrases( String phrase, String[] posItems, int direction) {
		//
		WordSet ws = new WordSet();
		NlpWord nlpw;
		String str, ptstr, ptsynt ,wordstr, posTag ;
		String[] words;
		int sp;
		// splitting
			
		if ((phrase==null) || (phrase.length()==0)){
			return ws;
		}
		str = phrase.trim();
		
		str = strgutil.replaceAll(str,", ", " ");
		str = strgutil.replaceAll(str," - ", " ");
		str = strgutil.replaceAll(str,"  ", " ");
		
		words = str.split(" ");
		
		for (int i=0;i<words.length;i++){
			wordstr = words[i];
			sp = -1;
			// get its PoSTag
			posTag = "";
			
			for (int k=0;k<resultSet.getTaggedPoS().size();k++){
				ptstr = resultSet.getTaggedPoS().get(k).textual ;
				ptsynt = resultSet.getTaggedPoS().get(k).syntacticalTag ;
				if (ptstr.contentEquals(wordstr)){
					posTag = ptsynt;
					sp = k;
					break;
				}
			}
			
			if (posTag.length()==0){
				return ws;
			}
			
			// match against list of posItems, respecting * wildcard
			boolean done=false;
			boolean recognized =false;
			boolean invertedMatch = false;
			
			
			for(int p=0;p<posItems.length;p++){
				
				String pip = posItems[p];
				
				recognized = strgutil.matchSimpleWildcard( pip, posTag);
				
				if (recognized){
					done=true;
				}
				if ((direction<0) && (done)){
					recognized = !recognized; invertedMatch=true;
				}
				if (done){
					break;
				}
			} // p->

			boolean hb = (recognized && ((direction>0)) );
			if (hb==false){
				hb = (recognized==false) && (direction<0) && (invertedMatch==false);
			}
			if (hb==false){
				hb = (recognized==true) && (direction<0) && (invertedMatch==true);
			}
			
			if (hb){
				nlpw = new NlpWord() ;
				nlpw.setLabel(wordstr);
				nlpw.setPosTag(posTag);
				NlpPosition nlpp = new NlpPosition();
				nlpp.setContextToSentence();
				nlpp.setPosition(sp);
				nlpw.getNlpPositions().getItems().add( nlpp );
				
				ws.getWords().add(nlpw);
			}else{
				
			}

			
		}// i-> 
			
		
		return ws;
	}


	private boolean negationIsPresent() {
		// 
		return false;
	}
	
	//actually, we should transcribe it into a string of primitives, using a table, and then using regex
	private WordSets detectingSimplePattern(ArrayList<PoStagged> taggedPoS, String[] posItems, int distance) {
		
		WordSets ws = new WordSets();
		int lastPos=0;
		WordSet wset;
		String posTag,lbl1,lbl2;
		String currLastWordStr="" ;
		WordSet lastSet=null ;
		ArrayList<String> lastSetLabels ;
		NlpWord lastWord =null;
		
		int[] frequencies = new int[posItems.length];
		int[] posits;
		
		
		posits = lptools.allIndicesofParticle(taggedPoS, posItems[0]) ;
		
		for (int i=0;i<posits.length;i++){
			int p1 = posits[i];
			int p2 = lptools.nextParticle(taggedPoS, posItems[1], p1);
			
			if ((p1>0) && (p2>p1) && (p2-p1<=1+distance)){
				
				wset = new WordSet();
				addWordtoWordset(wset,taggedPoS,p1);
				addWordtoWordset(wset,taggedPoS,p2);
				
				lbl1 = taggedPoS.get(p1).getTextual() ;
				lbl2 = taggedPoS.get(p2).getTextual() ;
				
				if ((p1-lastPos<=distance+1) && (ws.size()>0)){
					lastSet = ws.getItems().get(ws.size() - 1);
					lastSetLabels = lastSet.getLabels();
					lastWord = lastSet.getWords().get(lastSet.size() - 1);
					currLastWordStr = taggedPoS.get(p2).getTextual();
				}else{
					lastWord=null;
				}
				if ((lastWord!=null) && (lastWord.getLabel().contentEquals(currLastWordStr) )){
					// if the second = last word of the current pair is the same as the last in the previous,
					// we do not open a new set, instead, we extend the last set by that actual last word
					if (lastSet!=null){
						lastSet.getWords().add(1, wset.getWord(0));
					}
				}else{
					if (ws.contains(wset)==false){
						wset.setConsecutiveWords(true);
						ws.add(wset);
					}
				}
				lastPos = p2;
			}
		}
		
		/*
		for (int i=0;i<posItems.length;i++){
			
			posTag = posItems[i] ;
			posits = lptools.allIndicesofParticle(taggedPoS, posTag) ;
			frequencies[i] = posits.length ;
		}
		
		int[][] positions = new int[posItems.length][taggedPoS.size()] ;
		
		for (int i=0;i<taggedPoS.size();i++){
			
			posTag = posItems[i] ;
			posits = lptools.allIndicesofParticle(taggedPoS, posTag) ;
			
			for (int p=0;p<posits.length;p++){
				positions[i][p] = posits[p];
			}
		}// i->
		
		
		for (int i=0;i<positions.length;i++){
			
			for (int k=0;k<positions[i].length;k++){
				
				p = positions[k];
				
			}// k->
		}// i->
		
		*/
		
		return ws;
	}
	private void addWordtoWordset(  WordSet ws, ArrayList<PoStagged> taggedPoS,
									int p1) {
		// 
		NlpWord w = new NlpWord();
		w.setLabel( taggedPoS.get(p1).textual );
		w.setPosTag(taggedPoS.get(p1).syntacticalTag );
		
		NlpPosition p = new NlpPosition();
		p.setPosition(p1) ;
		p.setContextToSentence() ;
		w.getNlpPositions().getItems().add(p );
		ws.getWords().add(w) ;

	}
	
}
