package RE.impl.ex4;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import RE.impl.ex3.DependencyName;
import RE.rep.*;
import RE.rep.TokenWithNER.NamedEntityType;

/**
 * @author zirolny 30795560 charika 30534360
 */
public class Ex4 {
	private static List<String> allEntities = new ArrayList<String>();
	private static List<String> allWords = new ArrayList<String>();

	public static void main(String[] args) {
		List<SentenceWithDependenciesAndNER> devSentences = getCorpusSentences(args[0]);
		List<SentenceWithDependenciesAndNER> testSentences = getCorpusSentences(args[2]);

		Set<String> entities = extractEntities(devSentences);
		entities.addAll(extractEntities(testSentences));
		allEntities.addAll(entities);
		Collections.sort(allEntities);

		Set<String> words = extractWords(devSentences);
		words.addAll(extractWords(testSentences));
		allWords.addAll(words);
		Collections.sort(allWords);

		createOutputDirectory(args[4]);

		createArffFile(devSentences, args[1], args[4] + "/train.arff");
		createArffFile(testSentences, args[3], args[4] + "/test.arff");

		// checkClassifierResults("predicted.arff");
		// analyseError("predicted4.arff");
	}

	/**
	 * Extract the sentences which appear in filename (one of the corpus files)
	 * 
	 * @param filename
	 *            The corpus filename.
	 * @return The sentences.
	 */
	private static List<SentenceWithDependenciesAndNER> getCorpusSentences(String filename) {
		String text;
		try {
			// Get a reader to the corpus file
			BufferedReader reader = new BufferedReader(new FileReader(filename));
			// Read all the lines of the file
			String line;
			StringBuilder textBuffer = new StringBuilder();
			while ((line = reader.readLine()) != null)
				textBuffer.append(line);
			// Close the reader
			reader.close();
			text = textBuffer.toString();
		} catch (Exception e) {
			e.printStackTrace();
			return null;
		}
		// Remove the spaces between the tags
		text = trimSpaces(text);
		// Return the sentences from the text
		return extractSentences(text);
	}

	/**
	 * Removes the spaces between the tags.
	 * 
	 * @param text
	 *            The text to remove from.
	 * @return The text without the spaces.
	 */
	private static String trimSpaces(String text) {
		// Create the string buffer
		StringBuilder retStr = new StringBuilder();
		// Check all the chars in the text
		for (int i = 0; i < text.length(); i++) {
			// Get the current char
			char ch = text.charAt(i);
			// Check if the char is space or tab
			if ((ch == ' ') || (ch == '\t')) {
				boolean startingTag = false;
				// Check all the previous chars
				for (int j = i - 1; j >= 0; j--) {
					// Get the current char
					char ch2 = text.charAt(j);
					// Check if it is a starting tag
					if (ch2 == '<') {
						startingTag = true;
						break;
						// Check if it is an ending char
					} else if (ch2 == '>') {
						startingTag = false;
						break;
					}
				}
				// Check if the char is inside a tag
				if (!startingTag)
					continue;
			}
			// Add the char to the new string
			retStr.append(ch);
		}
		// Return the new string
		return retStr.toString();
	}

	/**
	 * Gets the sentences of the given text.
	 * 
	 * @param text
	 *            The text to get the sentences from.
	 * @return A list of sentences in the text.
	 */
	private static List<SentenceWithDependenciesAndNER> extractSentences(String text) {
		// Create the list of sentences
		List<SentenceWithDependenciesAndNER> sentencesList = new ArrayList<SentenceWithDependenciesAndNER>();
		// Create the pattern of the whole file
		String filePat = "<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>" + "<\\?xml-stylesheet href=\"CoreNLP-to-HTML.xsl\" type=\"text/xsl\"\\?>" + "<root>" + "<document>" + "<sentences>" + "(.*)"
				+ "</sentences>" + "</document>" + "</root>";
		// Compile and evaluate the pattern
		Pattern filePattern = Pattern.compile(filePat);
		Matcher fileMatcher = filePattern.matcher(text);
		fileMatcher.matches();
		// Get the sentences part
		String sentences = fileMatcher.group(1);

		// Create the pattern of a single sentence
		String sentencePat = "<sentence id=\"([0-9]*)\">" + "<tokens>" + "((" + "<token id=\"[0-9]*\">" + "<word>[^<]*</word>" + "<lemma>[^<]*</lemma>"
				+ "<CharacterOffsetBegin>[0-9]*</CharacterOffsetBegin>" + "<CharacterOffsetEnd>[0-9]*</CharacterOffsetEnd>" + "<POS>[^<]*</POS>" + "<NER>[^<]*</NER>"
				+ "(<NormalizedNER>[^<]*</NormalizedNER>)?" + "((<Timex tid=\"[^<]*\" type=\"[^<]*\"/>)|" + "(<Timex tid=\"[^<]*\" type=\"[^<]*\">[^<]*</Timex>))?" + "</token>" + ")*)" + "</tokens>"
				+ "<parse>[^<]*</parse>" + "<basic-dependencies>" + "((" + "<dep type=\"[^<]*\">" + "<governor idx=\"[0-9]*\">[^<]*</governor>" + "<dependent idx=\"[0-9]*\">[^<]*</dependent>"
				+ "</dep>" + ")*)" + "</basic-dependencies>" + "<collapsed-dependencies>" + "((" + "<dep type=\"[^<]*\">" + "<governor idx=\"[0-9]*\">[^<]*</governor>"
				+ "<dependent idx=\"[0-9]*\">[^<]*</dependent>" + "</dep>" + ")*)" + "</collapsed-dependencies>" + "<collapsed-ccprocessed-dependencies>" + "((" + "<dep type=\"[^<]*\">"
				+ "<governor idx=\"[0-9]*\">[^<]*</governor>" + "<dependent idx=\"[0-9]*\">[^<]*</dependent>" + "</dep>" + ")*)" + "</collapsed-ccprocessed-dependencies>" + "</sentence>";
		// Compile the pattern
		Pattern sentencePattern = Pattern.compile(sentencePat);
		Matcher sentenceMatcher = sentencePattern.matcher(sentences);

		// Continue while there are more sentences
		while (sentenceMatcher.find()) {
			// Create the pattern of a single token
			String tokenPat = "<token id=\"([0-9]*)\">" + "<word>([^<]*)</word>" + "<lemma>([^<]*)</lemma>" + "<CharacterOffsetBegin>[0-9]*</CharacterOffsetBegin>"
					+ "<CharacterOffsetEnd>[0-9]*</CharacterOffsetEnd>" + "<POS>([^<]*)</POS>" + "<NER>([^<]*)</NER>" + "(<NormalizedNER>[^<]*</NormalizedNER>)?"
					+ "((<Timex tid=\"[^<]*\" type=\"[^<]*\"/>)?|" + "(<Timex tid=\"[^<]*\" type=\"[^<]*\">[^<]*</Timex>)?)" + "</token>";
			// Compile the pattern
			Pattern tokenPattern = Pattern.compile(tokenPat);
			Matcher tokenMatcher = tokenPattern.matcher(sentenceMatcher.group(2));
			String sentenceID = "";
			StringBuilder xmlFormattedSentence = new StringBuilder();
			ArrayList<TokenWithNER> tokens = new ArrayList<TokenWithNER>();
			// Get all the tokens
			while (tokenMatcher.find()) {
				// Get the token word
				String word = tokenMatcher.group(2);
				if (word.matches("sent[0-9]+") && sentenceID.intern() == "")
					sentenceID = word;
				// Check if inside a sentence
				if (sentenceID.length() == 0)
					continue;
				// Get the token ID
				int tokenID = Integer.valueOf(tokenMatcher.group(1));
				// Get the token lemma
				String lemma = tokenMatcher.group(3);
				// Get the token POS
				String posStr = tokenMatcher.group(4);
				POS pos;
				try {
					pos = POS.valueOf(posStr);
				} catch (Exception e) {
					pos = POS.OTHER;
				}
				String NERStr = tokenMatcher.group(5);
				NamedEntityType ner;
				try {
					ner = NamedEntityType.valueOf(NERStr);
				} catch (Exception e) {
					ner = NamedEntityType.NOT_DEFINED;
				}
				// Create the token
				TokenWithNER token = new TokenWithNER(tokenID, sentenceID, word, lemma, pos, ner);
				// Add the token to the tokens list
				tokens.add(token);
				// Add the token to the XML string
				xmlFormattedSentence.append(tokenMatcher.group(0));
			}

			// String parsePat="<parse>([^<])*</parse>";

			String depPat = "<dep type=\"([^<]*)\">" + "<governor idx=\"([0-9]*)\">[^<]*</governor>" + "<dependent idx=\"([0-9]*)\">[^<]*</dependent>" + "</dep>";

			// Compile the pattern
			Pattern depPattern = Pattern.compile(depPat);
			Matcher basicDepMatcher = depPattern.matcher(sentenceMatcher.group(8));

			List<SyntacticDependency> dependencies = new ArrayList<SyntacticDependency>();
			// Get all the basic dependencies
			while (basicDepMatcher.find()) {
				String depType = basicDepMatcher.group(1);
				Integer governorIdx = Integer.parseInt(basicDepMatcher.group(2));
				Integer dependentIdx = Integer.parseInt(basicDepMatcher.group(3));
				TokenWithNER governorTok = null, dependentTok = null;
				for (TokenWithNER tok : tokens) {
					if (tok.getTokenId() == governorIdx)
						governorTok = tok;
					if (tok.getTokenId() == dependentIdx)
						dependentTok = tok;
				}
				DependencyName dep;
				try {
					dep = DependencyName.valueOf(depType);
				} catch (Exception e) {
					dep = null;
				}
				if ((dep != null) && (governorTok != null) && (dependentTok != null))
					dependencies.add(new SyntacticDependency(dep, governorTok, dependentTok));
			}

			Matcher collapsedDepMatcher = depPattern.matcher(sentenceMatcher.group(10));
			// Get all the collapsed dependencies
			while (collapsedDepMatcher.find()) {
				String depType = collapsedDepMatcher.group(1);
				Integer governorIdx = Integer.parseInt(collapsedDepMatcher.group(2));
				Integer dependentIdx = Integer.parseInt(collapsedDepMatcher.group(3));
				TokenWithNER governorTok = null, dependentTok = null;
				for (TokenWithNER tok : tokens) {
					if (tok.getTokenId() == governorIdx)
						governorTok = tok;
					if (tok.getTokenId() == dependentIdx)
						dependentTok = tok;
				}
				DependencyName dep;
				try {
					dep = DependencyName.valueOf(depType);
				} catch (Exception e) {
					dep = null;
				}
				if ((dep != null) && (governorTok != null) && (dependentTok != null))
					dependencies.add(new SyntacticDependency(dep, governorTok, dependentTok));
			}

			Matcher ccprocessedDepMatcher = depPattern.matcher(sentenceMatcher.group(12));
			// Get all the ccprocessed dependencies
			while (ccprocessedDepMatcher.find()) {
				String depType = ccprocessedDepMatcher.group(1);
				Integer governorIdx = Integer.parseInt(ccprocessedDepMatcher.group(2));
				Integer dependentIdx = Integer.parseInt(ccprocessedDepMatcher.group(3));
				TokenWithNER governorTok = null, dependentTok = null;
				for (TokenWithNER tok : tokens) {
					if (tok.getTokenId() == governorIdx)
						governorTok = tok;
					if (tok.getTokenId() == dependentIdx)
						dependentTok = tok;
				}
				DependencyName dep;
				try {
					dep = DependencyName.valueOf(depType);
				} catch (Exception e) {
					dep = null;
				}
				if ((dep != null) && (governorTok != null) && (dependentTok != null))
					dependencies.add(new SyntacticDependency(dep, governorTok, dependentTok));
			}
			// Add the last sentence if exists
			if (sentenceID.length() > 0)
				sentencesList.addAll(splitSentence(xmlFormattedSentence.toString(), tokens, dependencies));
		}

		// Return the sentences list
		return sentencesList;
	}

	/**
	 * If the given sentence contains several sentences, split them into
	 * separate sentences
	 * 
	 * @param xmlFormattedSentence
	 *            The XML format of the sentence.
	 * @param tokens
	 *            The sentence tokens.
	 * @param dependencies
	 *            The sentence dependencies.
	 * @return The split sentence.
	 */
	private static List<SentenceWithDependenciesAndNER> splitSentence(String xmlFormattedSentence, List<TokenWithNER> tokens, List<SyntacticDependency> dependencies) {
		ArrayList<SentenceWithDependenciesAndNER> sentences = new ArrayList<SentenceWithDependenciesAndNER>();
		int i = 0;

		// looking for the first "sent" token
		while ((i < tokens.size()) && (!tokens.get(i).getWord().matches("sent[0-9]+")))
			i++;

		// no sentence at all
		if (i == tokens.size())
			return new ArrayList<SentenceWithDependenciesAndNER>();

		while (true) {
			int j = i + 1;
			// looking for next sent token
			while ((j < tokens.size()) && (!tokens.get(j).getWord().matches("sent[0-9]+")))
				j++;

			// getting all the tokens so far as new sentence
			List<TokenWithNER> sentenceTokens = tokens.subList(i, j);

			// updating each token's sentence id to be the id of the closest
			// "sent" token
			for (TokenWithNER token : sentenceTokens)
				token.setSentenceId(tokens.get(i).getWord());

			// getting the dependencies which are relevant to this sentence
			ArrayList<SyntacticDependency> sentenceDependencies = new ArrayList<SyntacticDependency>();
			for (SyntacticDependency dependency : dependencies) {
				if ((dependency.getGovernorToken().getTokenId() > i) && (dependency.getGovernorToken().getTokenId() <= j) && (dependency.getDependentToken().getTokenId() > i)
						&& (dependency.getDependentToken().getTokenId() <= j)) {
					sentenceDependencies.add(dependency);
				}
			}

			// adding the sub sentence we found as new sentence
			sentences.add(new SentenceWithDependenciesAndNER(xmlFormattedSentence, sentenceTokens, sentenceDependencies));
			if (j == tokens.size())
				break;
			i = j;
		}
		return sentences;
	}

	/**
	 * Extract all the name entities which appear in all the sentences.
	 * 
	 * @param sentences
	 *            The sentences.
	 * @return The name entities.
	 */
	private static Set<String> extractEntities(List<SentenceWithDependenciesAndNER> sentences) {
		Set<String> entities = new HashSet<String>();
		for (SentenceWithDependenciesAndNER sentence : sentences) {
			Set<String> sentenceEntities = getEntitiesFromSentence(sentence);
			for (String entity : sentenceEntities)
				entities.add(entity);
		}
		return entities;
	}

	/**
	 * Extract all the name entities from given sentence.
	 * 
	 * @param sentence
	 *            The sentence.
	 * @return The name entities.
	 */
	private static Set<String> getEntitiesFromSentence(SentenceWithDependenciesAndNER sentence) {
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		Set<String> entities = new HashSet<String>();
		int i = 2;
		while (i < tokens.size()) {
			NamedEntityType ner = tokens.get(i).getNamedEntityType();
			i++;
			// this token is not a name entity
			if (ner == NamedEntityType.NOT_DEFINED)
				continue;
			StringBuilder entity = new StringBuilder(tokens.get(i - 1).getWord());
			// getting the others parts of this entity (following parts with
			// same NER)
			while ((i < tokens.size()) && (tokens.get(i).getNamedEntityType() == ner)) {
				entity.append(" " + tokens.get(i).getWord());
				i++;
			}
			entities.add(entity.toString());
		}
		return entities;
	}

	/**
	 * Extract the lemmas of all significant words which appear in sentences.
	 * 
	 * @param sentences
	 *            The sentences.
	 * @return The words.
	 */
	private static Set<String> extractWords(List<SentenceWithDependenciesAndNER> sentences) {
		Map<String, Integer> wordsCounter = new HashMap<String, Integer>();

		// for each sentence
		for (SentenceWithDependenciesAndNER sentence : sentences) {
			@SuppressWarnings("unchecked")
			List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
			// for each token
			for (int i = 2; i < tokens.size(); i++) {
				Integer counter = wordsCounter.get(tokens.get(i).getLemma());
				// new word in map
				if (counter == null)
					wordsCounter.put(tokens.get(i).getLemma(), 1);
				// word already appeared in map, update its frequency
				else
					wordsCounter.put(tokens.get(i).getLemma(), counter + 1);
			}
		}

		// filters words which appear less then 2 times
		Set<String> words = new HashSet<String>();
		for (Entry<String, Integer> entry : wordsCounter.entrySet()) {
			if (entry.getValue() > 1)
				words.add(entry.getKey());
		}
		return words;
	}

	/**
	 * Create the output directory.
	 * 
	 * @param dirName
	 *            The directory name.
	 */
	private static void createOutputDirectory(String dirName) {
		File dir = new File(dirName);
		if (!dir.exists()) {
			dir.mkdir();
			return;
		}
		for (File file : dir.listFiles())
			file.delete();
	}

	/**
	 * Creates arff file of examples from sentence and given annotation
	 * 
	 * @param sentences
	 *            The sentences.
	 * @param annotationFilename
	 *            The annotation filename.
	 * @param outputFilename
	 *            The arff filename.
	 */
	private static void createArffFile(List<SentenceWithDependenciesAndNER> sentences, String annotationFilename, String outputFilename) {
		// loading given annotation
		List<Relation> annotation = loadAnnotation(annotationFilename);
		PrintWriter writer;
		// PrintWriter writer2;

		// writer to wanted filename
		try {
			writer = new PrintWriter(outputFilename);
			// writer2 = new PrintWriter("sentences.txt");
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			return;
		}

		// printing details of arff file
		writer.println("@relation nlpRelations");
		writer.println();

		// feature that represents the entity of first argument (1 in the entity
		// that is equal to first argument and 0 in all others
		for (int i = 0; i < allEntities.size(); i++)
			writer.println("@attribute firstEntityFeat" + i + " numeric");

		// feature that represents NER type of first entity (one of
		// {PERSON,ORGANIZATION,LOCATION})
		writer.println("@attribute firstEntityNer {PERSON,ORGANIZATION,LOCATION}");

		// feature that represents the entity of second argument (1 in the
		// entity that is equal to first argument and 0 in all others
		for (int i = 0; i < allEntities.size(); i++)
			writer.println("@attribute secondEntityFeat" + i + " numeric");

		// feature that represents NER type of second entity (one of
		// {PERSON,ORGANIZATION,LOCATION})
		writer.println("@attribute secondEntityNer {PERSON,ORGANIZATION,LOCATION}");

		// features that represents which words appear before the first
		// argument(1 in every word that appears before and 0 in others)
		for (int i = 0; i < allWords.size(); i++)
			writer.println("@attribute wordsBeforeFeat" + i + " numeric");
		// features that represents which words appear after the second
		// argument(1 in every word that appears after and 0 in others)
		for (int i = 0; i < allWords.size(); i++)
			writer.println("@attribute wordsAfterFeat" + i + " numeric");
		// features that represents which words appear between the arguments(1
		// in every word that appears between and 0 in others)
		for (int i = 0; i < allWords.size(); i++)
			writer.println("@attribute wordsBetweenFeat" + i + " numeric");

		// feature which gets 1 if the first entity is the subject of the verb
		// and 0 otherwise
		writer.println("@attribute isFirstEntitySubjOfVerb numeric");

		// feature which gets 1 if the second entity is the subject of the verb
		// and 0 otherwise
		writer.println("@attribute isSecondEntitySubjOfVerb numeric");

		// feature which gets 1 if the first entity is the object of the verb
		// and 0 otherwise
		writer.println("@attribute isFirstEntityObjOfVerb numeric");

		// feature which gets 1 if the second entity is the object of the verb
		// and 0 otherwise
		writer.println("@attribute isSecondEntityObjOfVerb numeric");

		/*
		 * feature which gets 1 if there is an appos connection between the
		 * entities(there is an appos dependency between the first entity and
		 * some other token, which is dependent with the second entity)
		 */
		writer.println("@attribute isApposConnectionBetweenEntities numeric");

		// distance between first entity to first verb
		writer.println("@attribute distanceFirstEntityFromVerb numeric");

		// distance between first entity to second verb
		writer.println("@attribute distanceSecondEntityFromVerb numeric");

		// feature for the POS of two words which appear before the first verb
		for (int i = 0; i < 2; i++) {
			writer.print("@attribute wordsBeforeVerbPos" + (i + 1) + " {");
			writer.print("no,");
			writer.print(POS.values()[0].name());
			for (int j = 1; j < POS.values().length; j++) {
				writer.print("," + POS.values()[j].name());
			}
			writer.println("}");
		}

		// feature for the POS of two words which appear after the first verb
		for (int i = 0; i < 2; i++) {
			writer.print("@attribute wordsAfterVerbPos" + (i + 1) + " {");
			writer.print("no,");
			writer.print(POS.values()[0].name());
			for (int j = 1; j < POS.values().length; j++) {
				writer.print("," + POS.values()[j].name());
			}
			writer.println("}");
		}

		writer.println("@attribute isFirstArgBefore numeric");
		writer.println("@attribute isRelatedToKilling numeric");
		writer.println("@attribute areEntitiesSeperatedByComma numeric");
		writer.println("@attribute isInBetweenEntities numeric");
		// writing the last attribute- class
		writer.println("@attribute class {OrgBased_In,Located_In,Work_For,Live_In,Kill,No_Relation}");
		writer.println();

		writer.println("@data");
		// printing all the examples
		for (SentenceWithDependenciesAndNER sentence : sentences) {
			String sentenceID = sentence.getTokens().get(0).getSentenceId();
			Set<String> entities = getEntitiesFromSentence(sentence);
			// for each two different name entities in the sentence, create an
			// example
			for (String firstEntity : entities) {
				for (String secondEntity : entities) {
					if (firstEntity.intern() != secondEntity.intern()) {
						String relationName = "No_Relation";
						for (Relation relation : annotation) {
							if ((relation.getArg1().intern() == firstEntity.intern()) && (relation.getArg2().intern() == secondEntity.intern())
									&& (relation.getSentenceId().intern() == sentenceID.intern())) {
								relationName = relation.getRelName().name();
								break;
							}
						}
						writer.println(buildExample(sentence, firstEntity, secondEntity, relationName));
						// writer2.println(sentence.getTokens().get(0).getSentenceId());
					}
				}
			}
		}
		writer.close();
		// writer2.close();
	}

	/**
	 * Load the relations from the annotation file.
	 * 
	 * @param filename
	 *            The annotation filename.
	 * @return The relations.
	 */
	private static List<Relation> loadAnnotation(String filename) {
		// Create the list of the relations
		List<Relation> list = new ArrayList<Relation>();
		try {
			// Get a reader from the file
			BufferedReader reader = new BufferedReader(new FileReader(filename));
			String line;
			// Read all the lines
			while ((line = reader.readLine()) != null) {
				// Split the line by tab
				String[] elements = line.split("\t");
				// Create the relation based on the line read
				Relation rel = new Relation(Relation.RelationName.valueOf(elements[2]), elements[1], elements[3], elements[0]);
				list.add(rel);
			}
			// Close the reader
			reader.close();
		} catch (Exception e) {
			e.printStackTrace();
		}
		// Return the list of relations
		return list;
	}

	/**
	 * Creates a string which represents the example for the two entities in
	 * this sentence: in the format {index value, index2 value2...} for values
	 * which aren't 0 (sparse representation).
	 * 
	 * @param sentence
	 *            The sentence.
	 * @param firstEntity
	 *            The first entity.
	 * @param secondEntity
	 *            The second entity.
	 * @param relationClass
	 *            The class.
	 * @return The string.
	 */
	private static String buildExample(SentenceWithDependenciesAndNER sentence, String firstEntity, String secondEntity, String relationClass) {
		StringBuilder exampleLine = new StringBuilder();

		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		String[] splitted1 = firstEntity.split(" ");
		String[] splitted2 = secondEntity.split(" ");

		int firstIndex = 0, secondIndex = 0;

		// finding index of first entity and second entity
		for (int i = 0; i < tokens.size(); i++) {
			if (tokens.get(i).getWord().intern() == splitted1[0].intern() && tokens.get(i).getNamedEntityType() != NamedEntityType.NOT_DEFINED) {
				boolean same = true;
				int j = 1;
				// checking if rest of entity is identical
				while (j < splitted1.length) {
					if (tokens.get(i + j).getWord().intern() != splitted1[j].intern()) {
						same = false;
						break;
					}
					j++;
				}
				if (same)
					firstIndex = i;
			}
			// found the first word of second entity
			if (tokens.get(i).getWord().intern() == splitted2[0].intern() && tokens.get(i).getNamedEntityType() != NamedEntityType.NOT_DEFINED) {
				boolean same = true;
				int j = 1;
				// checking if rest of entity is identical
				while (j < splitted2.length) {
					if (tokens.get(i + j).getWord().intern() != splitted2[j].intern()) {
						same = false;
						break;
					}
					j++;
				}
				if (same)
					secondIndex = i;
			}
		}

		// writing value of features which aren't 0
		exampleLine.append("{");
		exampleLine.append(allEntities.indexOf(firstEntity) + " 1,");
		// first entity ner
		exampleLine.append(allEntities.size() + " " + tokens.get(firstIndex).getNamedEntityType().name() + ",");
		exampleLine.append((allEntities.indexOf(secondEntity) + allEntities.size() + 1) + " 1,");
		// second entity ner
		exampleLine.append((2 * allEntities.size() + 1) + " " + tokens.get(secondIndex).getNamedEntityType().name() + ",");

		// words before first entity
		for (Integer index : getWordsBefore(sentence, Math.min(firstIndex, secondIndex)))
			exampleLine.append(index + " 1,");
		// words after second entity
		for (Integer index : getWordsAfter(sentence, Math.max(firstIndex, secondIndex)))
			exampleLine.append(index + " 1,");
		// words between entities
		for (Integer index : getWordsBetween(sentence, Math.min(firstIndex, secondIndex), Math.max(firstIndex, secondIndex)))
			exampleLine.append(index + " 1,");

		// is first entity subject of verb
		if (isEntitySubjOfVerb(sentence, Math.min(firstIndex, secondIndex)))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 2) + " 1,");
		// is second entity subject of verb
		if (isEntitySubjOfVerb(sentence, Math.max(firstIndex, secondIndex)))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 3) + " 1,");
		// if first entity object of verb
		if (isEntityObjOfVerb(sentence, Math.min(firstIndex, secondIndex)))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 4) + " 1,");
		// is second entity object of word
		if (isEntityObjOfVerb(sentence, Math.max(firstIndex, secondIndex)))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 5) + " 1,");
		// is appos connection between the entities
		if (isApposConnectionBetweenEntities(sentence, Math.min(firstIndex, secondIndex), Math.max(firstIndex, secondIndex)))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 6) + " 1,");

		int dist;
		// distance first entity from verb
		if ((dist = distanceEntityVerb(sentence, Math.min(firstIndex, secondIndex))) > 0)
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 7) + " " + dist + ",");
		// distance second entity from verb
		if ((dist = distanceEntityVerb(sentence, Math.max(firstIndex, secondIndex))) > 0)
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 8) + " " + dist + ",");

		List<String> beforeAndAfterVerbPoses = getBeforeAndAfterVerbPoses(sentence);
		// POS of words before and after verb in +-2 window
		for (int i = 0; i < beforeAndAfterVerbPoses.size(); i++)
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 9 + i) + " " + beforeAndAfterVerbPoses.get(i) + ",");

		if (firstIndex < secondIndex)
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 9 + beforeAndAfterVerbPoses.size()) + " " + 1 + ",");
		if (isRelatedToKilling(sentence))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 10 + beforeAndAfterVerbPoses.size()) + " " + 1 + ",");
		if (areEntitiesSeperatedByComma(sentence, firstIndex, secondIndex))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 11 + beforeAndAfterVerbPoses.size()) + " " + 1 + ",");
		if (isInBetweenEntities(sentence, firstIndex, secondIndex))
			exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 12 + beforeAndAfterVerbPoses.size()) + " " + 1 + ",");

		// class
		exampleLine.append((2 * allEntities.size() + 3 * allWords.size() + 13 + beforeAndAfterVerbPoses.size()) + " " + relationClass);
		exampleLine.append("}");
		return exampleLine.toString();
	}

	/**
	 * Check if there is a single word, "in" between the entities.
	 * 
	 * @param sent
	 *            The sentence.
	 * @param firstIndex
	 *            The index of the first entity.
	 * @param secondIndex
	 *            The index of the second entity.
	 * @return true if there is a single word, "in" between the entities.
	 */
	private static boolean isInBetweenEntities(SentenceWithDependenciesAndNER sent, int firstIndex, int secondIndex) {
		if (secondIndex < firstIndex)
			return false;
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sent.getTokens();
		int i = firstIndex;
		NamedEntityType ner = tokens.get(firstIndex).getNamedEntityType();
		i++;
		// finding the end of the first entity
		while ((i < tokens.size()) && (tokens.get(i).getNamedEntityType() == ner))
			i++;
		if (i >= tokens.size() - 1)
			return false;
		// checking if there is "in" between entities
		return ((i + 1 == secondIndex) && (tokens.get(i).getWord().intern() == "in"));
	}

	/**
	 * Check if there is a single word, "," between the entities.
	 * 
	 * @param sent
	 *            The sentence.
	 * @param firstIndex
	 *            The index of the first entity.
	 * @param secondIndex
	 *            The index of the second entity.
	 * @return true if there is a single word, "," between the entities.
	 */
	private static boolean areEntitiesSeperatedByComma(SentenceWithDependenciesAndNER sent, int firstIndex, int secondIndex) {
		if (secondIndex < firstIndex)
			return false;
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sent.getTokens();
		int i = firstIndex;
		NamedEntityType ner = tokens.get(firstIndex).getNamedEntityType();
		i++;
		// looking for the end of first entity
		while ((i < tokens.size()) && (tokens.get(i).getNamedEntityType() == ner))
			i++;
		if (i >= tokens.size() - 1)
			return false;
		// checking if a comma appears between entities
		return ((i + 1 == secondIndex) && (tokens.get(i).getWord().intern() == ","));
	}

	/**
	 * Check if the sentence is related to killing.
	 * 
	 * @param sentence
	 *            The sentence.
	 * @return true if the sentence is related to killing.
	 */
	private static boolean isRelatedToKilling(Sentence sentence) {
		List<String> killSynonyms = new ArrayList<String>();
		killSynonyms.add("kill");
		killSynonyms.add("murder");
		killSynonyms.add("assassinate");
		killSynonyms.add("bomb");
		killSynonyms.add("shoot");
		killSynonyms.add("dispatch");
		killSynonyms.add("crucify");
		killSynonyms.add("fire");
		killSynonyms.add("drown");
		killSynonyms.add("suffocate");
		killSynonyms.add("strangle");
		killSynonyms.add("poison");
		killSynonyms.add("slay");
		killSynonyms.add("slaughter");
		killSynonyms.add("exterminate");
		killSynonyms.add("assassination");

		// looking for a word with relation to killing
		List<? extends Token> tokens = sentence.getTokens();
		for (Token token : tokens) {
			if (killSynonyms.contains(token.getLemma()))
				return true;
		}
		return false;
	}

	/**
	 * Get the indexes of features that represent words that appear before the
	 * first entity.
	 * 
	 * @param sentence
	 *            The sentence.
	 * @param firstEntityIndex
	 *            The first entity index.
	 * @return The indexes.
	 */
	private static List<Integer> getWordsBefore(SentenceWithDependenciesAndNER sentence, int firstEntityIndex) {
		List<Integer> appearingWords = new ArrayList<Integer>();
		int x = allEntities.size();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();

		// for each word before the first entity
		for (int i = 2; i < firstEntityIndex; i++) {
			int index = allWords.indexOf(tokens.get(i).getLemma());
			// isn't a relevant word
			if (index < 0)
				continue;
			Integer num = new Integer(2 * x + 2 + index);
			if (!appearingWords.contains(num))
				appearingWords.add(num);
		}
		Collections.sort(appearingWords);
		return appearingWords;
	}

	/**
	 * Get the indexes of features that represent words that appear after the
	 * second entity.
	 * 
	 * @param sentence
	 *            The sentence.
	 * @param secondEntityIndex
	 *            The second entity index.
	 * @return The indexes.
	 */
	private static List<Integer> getWordsAfter(SentenceWithDependenciesAndNER sentence, int secondEntityIndex) {
		List<Integer> appearingWords = new ArrayList<Integer>();
		int x = allEntities.size();
		int y = allWords.size();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		NamedEntityType ner = tokens.get(secondEntityIndex).getNamedEntityType();
		int i = secondEntityIndex + 1;
		while (i < tokens.size() && tokens.get(i).getNamedEntityType() == ner)
			i++;

		// for each word before the first entity
		for (; i < tokens.size(); i++) {
			int index = allWords.indexOf(tokens.get(i).getLemma());
			// isn't a relevant word
			if (index < 0)
				continue;
			Integer num = new Integer(2 * x + y + 2 + index);
			if (!appearingWords.contains(num))
				appearingWords.add(num);
		}
		Collections.sort(appearingWords);
		return appearingWords;
	}

	/**
	 * Get the indexes of features that represent words that appear between the
	 * entities.
	 * 
	 * @param sentence
	 *            The sentence.
	 * @param prevEntityIndex
	 *            The first entity index.
	 * @param nextEntityIndex
	 *            The second entity index.
	 * @return The indexes.
	 */
	private static List<Integer> getWordsBetween(SentenceWithDependenciesAndNER sentence, int prevEntityIndex, int nextEntityIndex) {
		List<Integer> appearingWords = new ArrayList<Integer>();
		int x = allEntities.size();
		int y = allWords.size();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		int i = prevEntityIndex;
		NamedEntityType ner = tokens.get(i).getNamedEntityType();
		while (i < tokens.size() && tokens.get(i).getNamedEntityType() == ner)
			i++;

		// for each word between the entities
		for (; i < nextEntityIndex; i++) {
			int index = allWords.indexOf(tokens.get(i).getLemma());

			// isn't relevant word
			if (index < 0)
				continue;
			Integer num = new Integer(2 * x + 2 * y + 2 + index);
			// adding its index to the list
			if (!appearingWords.contains(num))
				appearingWords.add(num);
		}
		Collections.sort(appearingWords);
		return appearingWords;
	}

	/**
	 * 
	 * @param sentence
	 * @param firstEntityIndex
	 * @return true if the entity at the given index is the subject of the verb
	 */
	private static boolean isEntitySubjOfVerb(SentenceWithDependenciesAndNER sentence, int firstEntityIndex) {

		List<SyntacticDependency> dependencies = sentence.getDependencies();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		List<Integer> tokendIdsEntity = new ArrayList<Integer>();
		int i = firstEntityIndex;
		// finding indexes of all words which the entity contains
		while (i < tokens.size() && tokens.get(i).getNamedEntityType() == tokens.get(firstEntityIndex).getNamedEntityType()) {
			tokendIdsEntity.add(tokens.get(i).getTokenId());
			i++;
		}
		// looking for a subj dependency
		for (SyntacticDependency dep : dependencies) {
			if (dep.getDependencyName() == DependencyName.nsubj && dep.getGovernorToken().getPos().name().startsWith("VB") && tokendIdsEntity.contains(dep.getDependentToken().getTokenId())) {
				return true;
			}
		}
		return false;
	}

	/**
	 * 
	 * @param sentence
	 * @param firstEntityIndex
	 * @return true if the entity at the given index is the object of the verb
	 */
	private static boolean isEntityObjOfVerb(SentenceWithDependenciesAndNER sentence, int firstEntityIndex) {

		List<SyntacticDependency> dependencies = sentence.getDependencies();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		List<Integer> tokendIdsEntity = new ArrayList<Integer>();
		int i = firstEntityIndex;
		// finding indexes of all words which the entity contains
		while (i < tokens.size() && tokens.get(i).getNamedEntityType() == tokens.get(firstEntityIndex).getNamedEntityType()) {
			tokendIdsEntity.add(tokens.get(i).getTokenId());
			i++;
		}
		// looking for dobj dependency
		for (SyntacticDependency dep : dependencies) {
			if (dep.getDependencyName() == DependencyName.dobj && dep.getGovernorToken().getPos().name().startsWith("VB") && tokendIdsEntity.contains(dep.getDependentToken().getTokenId())) {
				return true;
			}
		}
		return false;
	}

	/**
	 * @param sentence
	 * @param firstEntityIndex
	 * @param secondEntityIndex
	 * @return true if there is an appos dependency between the entity and
	 *         another token which is dependent of second entity
	 */
	private static boolean isApposConnectionBetweenEntities(SentenceWithDependenciesAndNER sentence, int firstEntityIndex, int secondEntityIndex) {

		List<SyntacticDependency> dependencies = sentence.getDependencies();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();

		List<Integer> tokendIdsFirstEntity = new ArrayList<Integer>();
		List<Integer> tokendIdsSecondEntity = new ArrayList<Integer>();

		// getting all indexes of first entity words
		int i = firstEntityIndex;
		while (i < tokens.size() && tokens.get(i).getNamedEntityType() == tokens.get(firstEntityIndex).getNamedEntityType()) {
			tokendIdsFirstEntity.add(tokens.get(i).getTokenId());
			i++;
		}
		i = secondEntityIndex;
		// getting all indexes of second entity words
		while (i < tokens.size() && tokens.get(i).getNamedEntityType() == tokens.get(secondEntityIndex).getNamedEntityType()) {
			tokendIdsSecondEntity.add(tokens.get(i).getTokenId());
			i++;
		}

		for (SyntacticDependency dep : dependencies) {
			// looking for an appos dependency where the governor is part of the
			// first entity
			if (dep.getDependencyName() == DependencyName.appos && tokendIdsFirstEntity.contains(dep.getGovernorToken().getTokenId())) {
				// looking for another dependency where the governor is the
				// previous dependent and trhe dependent is second entity
				for (SyntacticDependency dep2 : dependencies) {
					if (dep2.getGovernorToken() == dep.getDependentToken()) {
						return true;
					}
				}

			}
		}
		return false;
	}

	/**
	 * 
	 * @param sentence
	 * @param entityIndex
	 * @return distance of the verb from the given index
	 */
	private static int distanceEntityVerb(SentenceWithDependenciesAndNER sentence, int entityIndex) {
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		int i = 0;
		// found a verb
		for (i = 0; i < tokens.size(); i++) {
			if (tokens.get(i).getPos().name().startsWith("VB"))
				break;
		}
		if (i == tokens.size())
			return -1;
		return Math.abs(entityIndex - i);
	}

	/**
	 * 
	 * @param sentence
	 * @return Pos of the words in a window +-2 around the verb
	 */
	private static List<String> getBeforeAndAfterVerbPoses(Sentence sentence) {

		List<String> poses = new ArrayList<String>();
		@SuppressWarnings("unchecked")
		List<TokenWithNER> tokens = (List<TokenWithNER>) sentence.getTokens();
		int i = 0;
		for (i = 0; i < tokens.size(); i++) {
			if (tokens.get(i).getPos().name().startsWith("VB"))
				break;
		}

		// no verb in the sentence
		if (i == tokens.size()) {

			poses.add("no");
			poses.add("no");
			poses.add("no");
			poses.add("no");
			return poses;
		}

		// adding POS of first word before verb
		if (i - 1 > 0)
			poses.add(tokens.get(i - 1).getPos().name());
		else
			poses.add("no");

		// adding POS of second word before verb
		if (i - 2 > 0)
			poses.add(tokens.get(i - 2).getPos().name());
		else
			poses.add("no");

		// adding POS of first word after verb
		if (i + 1 < tokens.size())
			poses.add(tokens.get(i + 1).getPos().name());
		else
			poses.add("no");

		// adding POS of second word after verb
		if (i + 2 < tokens.size())
			poses.add(tokens.get(i + 2).getPos().name());
		else
			poses.add("no");

		return poses;
	}

	private static void checkClassifierResults(String filename) {
		try {
			BufferedReader reader = new BufferedReader(new FileReader(filename));

			HashMap<String, Integer> annotatedCount = new HashMap<String, Integer>();
			annotatedCount.put("Kill", 0);
			annotatedCount.put("OrgBased_In", 0);
			annotatedCount.put("Live_In", 0);
			annotatedCount.put("Work_For", 0);
			annotatedCount.put("Located_In", 0);

			HashMap<String, Integer> ourCount = new HashMap<String, Integer>();
			ourCount.putAll(annotatedCount);

			HashMap<String, Integer> correctPredictionsCount = new HashMap<String, Integer>();
			correctPredictionsCount.putAll(annotatedCount);

			String line;
			while ((line = reader.readLine()) != null) {
				if (line.intern() == "@data")
					break;
			}

			while ((line = reader.readLine()) != null) {
				String[] splitted = line.split(",");
				int len = splitted.length;
				if (splitted[len - 2].intern() != "No_Relation")
					annotatedCount.put(splitted[len - 2], annotatedCount.get(splitted[len - 2]) + 1);
				if (splitted[len - 1].intern() != "No_Relation") {
					ourCount.put(splitted[len - 1], ourCount.get(splitted[len - 1]) + 1);
					if (splitted[len - 1].intern() == splitted[len - 2].intern())
						correctPredictionsCount.put(splitted[len - 1], correctPredictionsCount.get(splitted[len - 1]) + 1);
				}
			}

			for (Entry<String, Integer> entry : correctPredictionsCount.entrySet()) {
				double recall = (double) entry.getValue() / annotatedCount.get(entry.getKey());
				double precision = (double) entry.getValue() / ourCount.get(entry.getKey());
				double f1 = (2 * recall * precision) / (recall + precision);
				System.out.println(entry.getKey() + ":");
				System.out.println((recall * 100) + "%\t" + (precision * 100) + "%\t" + (f1 * 100) + "%");
			}

			reader.close();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	private static void analyseError(String filename) {
		try {
			BufferedReader reader = new BufferedReader(new FileReader(filename));

			List<String> wrong = new ArrayList<String>();
			List<String> wrongClassifications = new ArrayList<String>();
			// List<String> falseNegative=new ArrayList<String>();
			String line;
			while ((line = reader.readLine()) != null) {
				if (line.intern() == "@data")
					break;
			}
			int count = 0;
			while ((line = reader.readLine()) != null) {
				String[] splitted = line.split(",");
				int len = splitted.length;
				if (splitted[len - 1].intern() != splitted[len - 2].intern()) {
					wrong.add(count + ":" + line);
					wrongClassifications.add(splitted[len - 1] + "," + splitted[len - 2]);
				}
				count++;
			}

			List<String> samples = new ArrayList<String>();
			List<String> wrong2 = new ArrayList<String>();
			for (String str : wrong)
				wrong2.add(str);
			Random r = new Random();
			for (int i = 0; i < 6; i++) {
				int index = r.nextInt(wrong.size());
				samples.add(wrong.get(index));
				wrong.remove(index);
			}

			reader.close();

			reader = new BufferedReader(new FileReader("sentences.txt"));
			List<Integer> sentenceIDs = new ArrayList<Integer>();

			while ((line = reader.readLine()) != null) {
				sentenceIDs.add(Integer.parseInt(line.substring(4)));
			}

			for (int i = 0; i < samples.size(); i++) {

				String ex = samples.get(i).split(":")[1];
				String[] splitted = ex.split(",");
				int firstEntityIndex = -1, secondEntityIndex = -1;
				for (int j = 0; j < splitted.length; j++) {
					if (splitted[j].equals("1") && firstEntityIndex == -1) {
						firstEntityIndex = j;
					} else if (splitted[j].equals("1")) {
						secondEntityIndex = j - allEntities.size() - 1;
						break;
					}
				}
				System.out.println("sample " + i + ":");
				System.out.println("_____________");
				System.out.println(sentenceIDs.get(Integer.parseInt(samples.get(i).split(":")[0])));
				System.out.println(allEntities.get(firstEntityIndex));
				System.out.println(allEntities.get(secondEntityIndex));
				System.out.println(wrongClassifications.get(wrong2.indexOf(samples.get(i))));

			}

		} catch (IOException e) {
			e.printStackTrace();
		}
	}
}
