package analyzer;

import java.util.HashMap;
import java.util.LinkedList;
import java.util.StringTokenizer;

import constants.Constants;

import parser.EntityElement;
import parser.InputEntity;

import lex.Lexicon;
import wordnet.WordNet;

public class Analyzer {

	/**
	 * OLD VERSION - DO NOT USE!!
	 */
	/*
	// our input text entity
	private InputEntity input;
	
	// the current phrase we are analyzing - each word has one array slot
	private String[] tokens;
	
	// the so far retrieved information from all previous phrases in hash maps
	private HashMap<String,Boolean> names;
	private HashMap<String,Boolean> locations;
	private HashMap<String,Boolean> organizations;
	
	// counters which signalize if next tokens have been detected as certain information
	// (to avoid double checking)
	private int nextWasName = 0;
	private int nextWasLocation = 0;
	private int nextWasOrganization = 0;
	
	// flags that signalize that a pattern word occurred that definitely assigns a token family to a type
	private boolean isName;
	private boolean isLocation;
	private boolean isOrganization;
	
	// our lexicon
	private Lexicon lexicon; 
	
	// our wordnet database
	private WordNet wordNet;
		
	public Analyzer(String file) {
		
		// open input file
		input = new InputEntity();
		input.open(file);
		
		// initialize lists
		names = new HashMap<String,Boolean>();
		locations = new HashMap<String,Boolean>();
		organizations = new HashMap<String,Boolean>();
		
		// initialize word net
		wordNet = new WordNet();
		
		// initialize lexicon
		lexicon = new Lexicon();
		lexicon.init();
	}
		
	private void setPhrase(String phrase) {
		StringTokenizer tokenizer = new StringTokenizer(phrase);
		tokens = new String[tokenizer.countTokens()];
		for(int i=0; tokenizer.hasMoreTokens(); i++) {
			tokens[i] = tokenizer.nextToken();
		}
	} 
	
	public boolean hasNextPhrase() {
		return input.hasPhrase();
	}
	
	public LinkedList<EntityElement> analyzeNextPhrase() {
		LinkedList<EntityElement> list = new LinkedList<EntityElement>();
		
		// set next phrase
		String nextPhrase = input.nextPhrase();
		setPhrase(nextPhrase);
		System.out.println("Start analyzing phrase: \n"+ nextPhrase+"\n ---------------------------");
		
		list = findEntities();
		// TODO
		
		System.out.println("---------- Finished analyzing phrase --------");
		return list;
	}
	
	private LinkedList<EntityElement> findEntities() {
		
		LinkedList<EntityElement> list = new LinkedList<EntityElement>();
		// check every token
		for(int i=0; i<tokens.length; i++) {
			// reset flags
			isLocation = false;
			isName = false;
			isOrganization = false;
			
			if(nextWasName > 0) {
				// this token has already been analyzed as a name -> skip
				nextWasName--;
				continue;
			}
			if(nextWasLocation > 0) {
				// this token has already been analyzed as a location -> skip
				nextWasLocation--;
				continue;
			}
			if(nextWasOrganization > 0) {
				// this token has already been analyzed as an organization -> skip
				nextWasOrganization--;
				continue;
			}
			// check this token (and maybe also next tokens) for name and store in list
			String na = checkTokenForName(list, i, "");
			if(na != "" && !isLocation && !isOrganization) {
				// check how many tokens are contained in this name so we can skip them in next loops
				nextWasName = na.split(" ").length-1;
				addToList(list, na, Constants.STYLE.NAME);
				continue;
			}
			// check this token (and maybe also next tokens) for location and store in list
			String lo = checkTokenForLocation(list, i, "");
			if(lo != "" && !isName && !isOrganization) {
				// check how many tokens are contained in this name so we can skip them in next loops
				nextWasLocation = lo.split(" ").length-1;
				addToList(list, lo, Constants.STYLE.LOCATION);
				continue;
			}
			// check this token (and maybe also next tokens) for organization and store in list
			checkTokenForOrganization(list, i, "");
			
			// NADA found...
			if(Constants.DEBUG) {
				//System.out.println("'"+tokens[i]+"'" + " has no valuable information");
			}
		}
		
		return list;
	}
	
	private String checkTokenForName(LinkedList<EntityElement> list, int i, String prev) {
		String elem = tokens[i].replaceAll("[^\\p{L}\\p{N}\\.]", ""); 
		if(i >= tokens.length-1) {
			elem = elem.replaceAll("[\\.\\?\\!]", "");
			// check if sth of the token is still left after removal
			if(elem.length()<=0) {
				return "";
			}
		}
		String total = prev+" "+elem;

		// check if token is capital letter and a possible candidate
		if(!Character.isUpperCase(elem.charAt(0))) {
			// no possible candidate -> skip
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" is no upper case -> skip");
			}
			return "";
		}
		
		// Did we already retrieve this value (token or token+prevTokens)?
		if(names.get(elem.trim()) != null || names.get(total.trim()) != null) {
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" has already been retrieved -> use again");
			}
			// check if next token is part of the name
			if(i<tokens.length-1) {
				elem += " "+checkTokenForName(list, i+1, total.trim());
			}
			return elem;
		}
		// Next check if name is in lexicon (token or token+prevTokens)
		if(lexicon.isName(elem.trim()) || lexicon.isName(total.trim())) {
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" has been found in name lexicon");
			}
			// check if next token is part of the name like 'Smith' from 'John Smith'
			if(i<tokens.length-1) {
				elem+= " "+checkTokenForName(list, i+1, total.trim());
			}
			names.put(total.trim(),true);
			return elem;
		}
		
		// could be sth. like 'J. Smith' --> check next just in case
		if(i<tokens.length-1) {
			String next = checkTokenForName(list, i+1, total.trim());
			if(next != "") {
				return elem+=" "+next;
			}
		}
		
		// ----- TODO: Grammatical methods here - or maybe better in extra method!!! ------
		// just check for trigger words
		// ---------------------------------------------
		
		if(prev != "") {
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" is capital and had previous names -> must mean sth.");
			}
			return elem;
		}
		
		if(Constants.DEBUG) {
			System.out.println("'"+elem.trim()+"'"+" no possible names candidate -> skip.");
		}
		return "";
	}
	
	private void addToList(LinkedList<EntityElement> list, String element, Constants.STYLE type) {
		// initialize our matcher
		input.initMatcher(element);
		int index = input.getNextStartIndex();
		while(index >= 0) {
			list.add(new EntityElement(index, element, type));
			index = input.getNextStartIndex();
		}
	}
	
	// TODO
	public String checkTokenForOrganization(LinkedList<EntityElement> list, int i, String prev) {
		return "";
	}
	
	public String checkTokenForLocation(LinkedList<EntityElement> list, int i, String prev) {
		String elem = tokens[i].replaceAll("[^\\p{L}\\p{N}]", ""); 
		// if phrase ending -> replace punctation
		if(i >= tokens.length-1) {
			elem = elem.replaceAll("[\\.\\?\\!]", "");
			// check if sth of the token is still left after removal
			if(elem.length()<=0) {
				return "";
			}
		}
		String total = prev+" "+elem;

		// check if token is capital letter and a possible candidate
		if(!Character.isUpperCase(elem.charAt(0))) {
			// no possible candidate -> skip
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" is no upper case -> skip");
			}
			return "";
		}
		
		// Did we already retrieve this value (token or token+prevTokens)?
		if(locations.get(elem.trim()) != null || locations.get(total.trim()) != null) {
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" has already been retrieved -> use again");
			}
			// check if next token is part of the name
			if(i<tokens.length-1) {
				elem += " "+checkTokenForLocation(list, i+1, total.trim());
			}
			return elem;
		}
		// Next check if name is in lexicon (token or token+prevTokens)
		if(lexicon.isLocation(elem.trim()) || lexicon.isLocation(total.trim())) {
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" has been found in location lexicon");
			}
			// check if next token is part of the name
			if(i<tokens.length-1) {
				elem+= " "+checkTokenForLocation(list, i+1, total.trim());
			}
			locations.put(total.trim(),true);
			return elem;
		}
		
		// could be sth. like J. Smith --> check next just in case
		if(i<tokens.length-1) {
			String next = checkTokenForLocation(list, i+1, total.trim());
			if(next != "") {
				return elem+=" "+next;
			}
		}
		
		if(prev != "") {
			if(Constants.DEBUG) {
				System.out.println("'"+elem.trim()+"'"+" is capital and had previous locations -> must mean sth.");
			}
			return elem;
		}
		
		if(Constants.DEBUG) {
			System.out.println("'"+elem.trim()+"'"+" no possible location candidate -> skip.");
		}
		return "";
	}
	*/
}
