package analyzer;

import java.util.HashMap;
import java.util.LinkedList;
import java.util.ListIterator;
import java.util.StringTokenizer;

import lex.Lexicon;
import parser.EntityElement;
import parser.InputEntity;
import wordnet.WordNet;
import constants.Constants;

public class AnalyzerBeta {

	// our input text entity
	private InputEntity input;
	
	// the current phrase we are analyzing - each word has one array slot
	private LinkedList<SpecialToken> specialTokens;
	
	// the so far retrieved information from all previous phrases in hash maps 
	// it is more likely that previously used names etc, will occur again
	private HashMap<String,Boolean> names;
	private HashMap<String,Boolean> locations;
	private HashMap<String,Boolean> organizations;
	
	// our lexicon
	private Lexicon lexicon; 
	
	// our wordnet database
	private WordNet wordNet;
		
	public AnalyzerBeta(String file) {
		
		// open input file
		input = new InputEntity();
		input.open(file);
		
		// initialize maps
		names = new HashMap<String,Boolean>();
		locations = new HashMap<String,Boolean>();
		organizations = new HashMap<String,Boolean>();
		
		// initialize word net
		wordNet = new WordNet();
		
		// initialize lexicon
		lexicon = new Lexicon(wordNet);
		lexicon.init();
	}
	
	public boolean hasNextPhrase() {
		return input.hasPhrase();
	}
	
	private void structurePhrase(String phrase) {
		specialTokens = new LinkedList<SpecialToken>();
		// traverse phrase into a token array
		StringTokenizer tokenizer = new StringTokenizer(phrase);
		String[] tokens = new String[tokenizer.countTokens()];
		for(int i=0; tokenizer.hasMoreTokens(); i++) {
			tokens[i] = tokenizer.nextToken();
		}
		
		// find main tokens and their corresponding indicators
		for(int i=0; i<tokens.length; i++) {
			// maybe TODO: separate main tokens when ',' or ';' between
			String curModTok = tokens[i].replaceAll("[^\\p{L}\\p{N}\\.]", "");
			char first = curModTok.charAt(0);
			LinkedList<TokenElement> main = new LinkedList<TokenElement>();
			int start = i;
			boolean foundMainToken = false;
			while(Character.isUpperCase(first)) {
				// we found an upper case -> (maybe) part of a main token
				foundMainToken = true;
				// check if first word and if word is stop word or most likely unimportant 
				// -> set foundMainToken false to skip this token!
				if(i == 0) {
					if(lexicon.isStopWord(curModTok)) {
						// stop word at beginning of sentence -> not important for us
						foundMainToken = false;
						break;
					}
				}
				main.add(new TokenElement(tokens[i], i, tokens.length));
				// find delimiters like ":,;" in which two different main tokens could occur coherently
				if(tokens[i].contains(",") || tokens[i].contains(":") || tokens[i].contains(";")) {
					break;
				}
				
				// set next element
				i++;
				// check if we reached end of token array
				if(i>=tokens.length) {
					break;
				}
				// get next beginning character
				curModTok = tokens[i].replaceAll("[^\\p{L}\\p{N}\\.]", "");
				first = curModTok.charAt(0);
			} 
			if(foundMainToken) {
				LinkedList<TokenElement> previous = new LinkedList<TokenElement>();
				// get previous indicator tokens
				for(int j=start-1; j>=0; j--) {
					curModTok = tokens[j].replaceAll("[^\\p{L}\\p{N}\\.]", "");
					// if it is lower case -> add it - could be an indicator
					if(Character.isLowerCase(curModTok.charAt(0))) {
						previous.add(new TokenElement(tokens[j],j, tokens.length));
					} 
					else {
						// upper case -> other main token
						break;
					}
				}
				LinkedList<TokenElement> next = new LinkedList<TokenElement>();
				// get next indicator tokens
				for(int j=i; j<tokens.length; j++) {
					curModTok = tokens[j].replaceAll("[^\\p{L}\\p{N}\\.]", "");
					// if it is lower case -> add it - could be an indicator
					if(Character.isLowerCase(curModTok.charAt(0))) {
						next.add(new TokenElement(tokens[j],j, tokens.length));
					} 
					else {
						// upper case -> other main token
						break;
					}
				}
				// now create the special token element
				TokenElement[] mainArray = new TokenElement[main.size()];
				main.toArray(mainArray);
				TokenElement[] prevArray = new TokenElement[previous.size()];
				previous.toArray(prevArray);
				TokenElement[] nextArray = new TokenElement[next.size()];
				next.toArray(nextArray);
				specialTokens.add(new SpecialToken(mainArray, prevArray, nextArray));
			}
		}
	} 
	
	public LinkedList<EntityElement> analyzeNextPhrase() {
		// set next phrase
		String nextPhrase = input.nextPhrase();
		System.out.println("Start analyzing phrase: \n"+ nextPhrase+"\n ---------------------------");
		structurePhrase(nextPhrase);
		// return analyzation result
		return findEntities();
	}
	
	private LinkedList<EntityElement> findEntities() {
		LinkedList<EntityElement> list = new LinkedList<EntityElement>();
		// go through all special tokens we found for this phrase
		ListIterator<SpecialToken> ite = specialTokens.listIterator();
		while(ite.hasNext()) {
			// --> Check name attributes -> give a score 
			// in the end compare all scores and assign token
			SpecialToken current = ite.next();
			double badScore = getBadScore(current);
			double nameScore = getNameScore(current);
			double locationScore = getLocationScore(current);
			double organizationScore = getOrganizationScore(current);
			// initialize matcher for index retrieval
			input.initMatcher(current.getOriginWord());
			int sIndex = input.getNextStartIndex();
			// COMPARE SCORES NOW
			if(nameScore >= locationScore && nameScore >= organizationScore && nameScore >= badScore) {
				// name score is highest -> add name entity to output list
				while(sIndex >= 0) {
					list.add(new EntityElement(input.getDocID(), sIndex, current.getOriginWord(), Constants.STYLE.NAME));
					sIndex = input.getNextStartIndex();
				}
				// add string to our name map for later recognition
				names.put(current.getOriginWord(),true);
			}
			else if(locationScore > nameScore && locationScore >= organizationScore && locationScore >= badScore) {
				// location score is highest -> add location entity to output list
				while(sIndex >= 0) {
					list.add(new EntityElement(input.getDocID(), sIndex, current.getOriginWord(), Constants.STYLE.LOCATION));
					sIndex = input.getNextStartIndex();
				}
				// add string to our location map for later recognition
				locations.put(current.getOriginWord(),true);
			}
			else if(organizationScore > nameScore && organizationScore > locationScore && organizationScore >= badScore) {
				// location score is highest -> add location entity to output list
				while(sIndex >= 0) {
					list.add(new EntityElement(input.getDocID(), sIndex, current.getOriginWord(), Constants.STYLE.ORGANIZATION));
					sIndex = input.getNextStartIndex();
				}
				// add string to our organization map for later recognition
				organizations.put(current.getOriginWord(),true);
			}
			else {
				// bad score wins -> skip element
				continue;
			}
		}
		// some elements could occur double -> remove them
		removeClones(list);	
		return list;
	}
	
	private void removeClones(LinkedList<EntityElement> list) {
		LinkedList<EntityElement> toDelete = new LinkedList<EntityElement>();
		for(EntityElement element : list) {
			int curIndex = element.getStartIndex();
			// find clones
			for(int i=list.size()-1; i>list.indexOf(element); i--) {
				if(list.get(i).getStartIndex() == curIndex) {
					// found a clone --> add to remove collection
					toDelete.add(element);
				}
			}
		}
		list.removeAll(toDelete);
	}
	
	private double getBadScore(SpecialToken token) {
		double score = Constants.BAD_INIT_SCORE;
		if(token.getMain()[0].getIndex() == 0 && token.getMain().length == 1) {
			// only one element and it is at sentence beginning
			score += Constants.PHRASE_BEGINNING_FACTOR;
		}
		return score;
	}
	
	private double getNameScore(SpecialToken token) {
		double score = Constants.NAME_INIT_SCORE;
		// first check each word in lexicon -> each hit adds a score
		for(int i=0; i<token.getMain().length; i++) {
			if(lexicon.isName(token.getMain()[i].getWord()) != null) {
				// found a hit -> add to score
				score += Constants.NAME_LEXICON_FACTOR*
					(lexicon.getEntries()-lexicon.isName(token.getMain()[i].getWord()))/
					lexicon.getEntries();
			}
		}
		// check if whole name is in lexicon - only if more than one word in token...
		if(token.getMain().length>1) {
			if(lexicon.isName(token.getOriginWord()) != null) {
				// found match -> add to score
				score += Constants.NAME_LEXICON_FULL_HIT_FACTOR*
					(lexicon.getEntries()-lexicon.isName(token.getOriginWord()))/
					lexicon.getEntries();
			}
		}
		// check if name was found before (is in memory map)
		if(names.get(token.getOriginWord()) != null) {
			// was found before as a name -> add to score
			score += Constants.NAME_IN_MAP_FACTOR;
		}
		// -----------------------------------------------------
		// TODO: look for signal words (according to course slide2) in previous or next words
		// the variable 'token' has an array of previous important words you have to analyze -> token.getPrev()
		// the variable 'token' also has an array of following important words you have to analyze 
		// -> token.getNext()
		// if some of the words fullfill some grammatical requirements add a certain score to the 'score'
		// variable. Use the variable 'wordNet' for the analysis - it is a database API which contains 
		// word meanings etc. -> read on the internet WordNet API for Java to know how to use it. 
		// wordnet library is included and the variable is already initialized and ready to use. 
		// Please DO NOT HARDCODE NUMBERS!!!! Any fixed score you are adding for a certain requirement 
		// write it in the Constants class (like with the previous scores I already implemented). 
		// Do the same for organization and location scoring. 
		// please also provide a formatted organization lexicon (modify lexicon.java) as i did with the names and locations 
		// and use this lexicon similarly for the organization score.
		// -> everything else is implemented and 
		// tested. You are also provided a test file 'test' which is analyzed by our algorithm 
		// -> you can change it anytime for your testing purposes
		
		
		// -----------------------------------------------------
		return score;
	}
	
	private double getLocationScore(SpecialToken token) {
		double score = Constants.LOCATION_INIT_SCORE;
		// first check each word in lexicon -> each hit adds a score
		for(int i=0; i<token.getMain().length; i++) {
			if(lexicon.isLocation(token.getMain()[i].getWord())) {
				// found a hit -> add to score
				score += Constants.LOCATION_LEXICON_FACTOR;
			}
		}
		// check if whole location is in lexicon - only if more than one word in token...
		if(token.getMain().length>1) {
			if(lexicon.isLocation(token.getOriginWord())) {
				// found match -> add to score
				score += Constants.LOCATION_LEXICON_FULL_HIT_FACTOR;
			}
		}
		// check if name was found before (is in memory map)
		if(locations.get(token.getOriginWord()) != null) {
			// was found before as a name -> add to score
			score += Constants.LOCATION_IN_MAP_FACTOR;
		}
		
		// TODO: implement grammar analyzation as in names score...
		
		return score;
	}
	
	private double getOrganizationScore(SpecialToken token) {
		// TODO implement
		return -1;
	}
}
