import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintStream;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;

import edu.stanford.nlp.ling.*;
import edu.stanford.nlp.ling.CoreAnnotations.LemmaAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.pipeline.*;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation;
import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.util.*;
import edu.sussex.nlp.jws.JWS;
import edu.sussex.nlp.jws.Lin;

public class Main {
	/*
	 * NAPOMENE: 1.Za sada se semanticka i lexicka slicnost svuda radi sa WORD
	 * umesto sa LEMOM i to proveri gde treba,plitko parsiranje mozda sim_word
	 * sa lemama? 2.takodje negde da se proverava da alfa bude od nule do jedan
	 * i da to nije outlier 3.Proveri sta su default vrednosti za semanticku i
	 * lexicku slicnost i da li je to 0 posto se desava da sinusna ili kosinusna
	 * slicnost ima vrednosti -1 do 1 4.
	 */

	public static String WORD_NET_DIRECTORY;
	public static String WORD_NET_VERSION;
	public static String NAME_OF_OUTPUT_FILE;
	public static String NAME_OF_INPUT_FILE;
	public static Integer DOUBLE_NUM_DECIMALS;
	
	public static Lin WN_lin;
	public static DecimalFormat out_DF;

	public static PosWrapper depend_ROOT = new PosWrapper("ROOT", "ROOT",
			"ROOT");

	private static String[] pos = { "CC", "PRP$", "RB", "DT", "RBR", "EX",
			"RBS", "FW", "RP",
			"IN",
			"SYM",// "CD"
			"JJ", "TO", "JJR", "UH", "JJS", "VB", "LS", "VBD", "MD", "VBG",
			"NN", "VBN", "NNS", "VBP", "NNP", "VBZ", "NNPS", "WDT", "PDT",
			"WP$", "POS", "WRB", "PRP" };
	private static String[] pos_verb_nouns = { "VB", "VBD", "MD", "VBG", "VBN",
			"VBP", "VBZ", "NN", "NNS", "NNP", "NNPS" };
	private static String[] pos_verbs = { "VB", "VBD", "MD", "VBG", "VBN",
			"VBP", "VBZ" };
	private static String[] pos_nouns = { "NN", "NNS", "NNP", "NNPS" };
	private static String[] depend = { "root", "dep", "aux", "auxpass", "cop",
			"arg", "agent", "comp", "acomp", "ccomp", "xcomp", "obj", "dobj",
			"iobj", "pobj", "subj", "nsubj", "nsubjpass", "csubj", "csubjpass",
			"cc", "conj", "expl", "mod", "amod", "appos", "advcl", "det",
			"predet", "preconj", "vmod", "mwe", "mark", "advmod", "neg",
			"rcmod", "quantmod", "nn", "npadvmod", "tmod", "num", "number",
			"prep", "poss", "possessive", "prt", "parataxis", "punct", "ref",
			"sdep", "xsubj" };

	public static void unrollSentence(String sentence,
			HashMap<String, ArrayList<PosWrapper>> pos_struct,
			HashMap<String, ArrayList<DependencyWrapper>> depend_struct) {

		// creates a StanfordCoreNLP object, with POS tagging, lemmatization,
		// NER, parsing, and coreference resolution
		Properties props = new Properties();
		props.put("annotators",
				"tokenize, ssplit, pos, lemma, ner, parse, dcoref");
		StanfordCoreNLP pipeline = new StanfordCoreNLP(props);

		// create an empty Annotation, just with the given text
		Annotation document = new Annotation(sentence);

		// run all Annotators on this text
		pipeline.annotate(document);

		// these are all the sentences in this document
		// a CoreMap is essentially a Map that uses class objects as keys and
		// has values with custom types
		List<CoreMap> sentences = document.get(SentencesAnnotation.class);
		ArrayList<PosWrapper> tempSentence = new ArrayList<PosWrapper>();// need
																			// for
																			// dependency
																			// gov
																			// and
																			// dep
																			// to
																			// know
																			// lema
																			// and
																			// POS

		// VEROVATNO CE DA OVDE BUDE SAMO JEDNA ITERACIJA JER SAM PODELIO
		// RECENICE
		int num_sent = 0;
		for (CoreMap sent : sentences) {
			num_sent++;

			// traversing the words in the current sentence
			// a CoreLabel is a CoreMap with additional token-specific methods

			for (CoreLabel token : sent.get(TokensAnnotation.class)) {
				// this is the text of the token
				String word = token.get(TextAnnotation.class);
				System.out.println("word: " + word);

				// this is the lema of the token
				String lema = token.get(LemmaAnnotation.class);
				System.out.println("lema: " + lema);

				// this is the POS tag of the token
				String partOfSpeech = token.get(PartOfSpeechAnnotation.class);
				System.out.println("pos: " + partOfSpeech);

				PosWrapper wrapper = new PosWrapper(word, lema, partOfSpeech);

				tempSentence.add(wrapper);

				// THIS PART IS VERY IMPORTANT because on this way I ignore
				// CD,all punctuations and all unrecognized words
				for (String p : pos) {
					if (p.equals(partOfSpeech)) {
						pos_struct.get(partOfSpeech).add(wrapper);// add word in
																	// that
																	// array
					}
				}

				// this is the NER label of the token
				// MAYBE FOR DODATNI ATRIBUT
				// String ne = token.get(NamedEntityTagAnnotation.class);
				// System.out.println(ne);
			}
			// this is the parse tree of the current sentence
			// Tree tree = sent.get(TreeAnnotation.class);
			// System.out.println(tree.toString());

			// this is the Stanford dependency graph of the current sentence
			SemanticGraph dependencies = sent
					.get(CollapsedCCProcessedDependenciesAnnotation.class);
			// System.out.println(dependencies.typedDependencies().toString());
			Collection<TypedDependency> collection_depend = dependencies
					.typedDependencies();
			for (TypedDependency td : collection_depend) {
				// check da li vrati null ako ne nadje reln u strukturi
				// save governor and dependent for discovered dependency

				DependencyWrapper dw = new DependencyWrapper();
				PosWrapper gov;
				if (td.gov().index() == 0) {
					gov = depend_ROOT;
				} else {
					gov = tempSentence.get(td.gov().index() - 1);// because
																	// index==0
																	// is ROOT
																	// for
																	// dependecies
				}

				if (!td.gov().value().equals(gov.getWord())) {
					System.out
							.println("ERROR FOR GOV VALUE: gov word is:"
									+ td.gov().value()
									+ ",but expected is from sentence:"
									+ gov.getWord());
				}
				dw.setGovernor(gov);
				PosWrapper dep;
				if (td.dep().index() == 0) {
					dep = depend_ROOT;
				} else {
					dep = tempSentence.get(td.dep().index() - 1);// because
																	// index==0
																	// is ROOT
																	// for
																	// dependecies
				}
				dw.setDependent(dep);
				if (!td.dep().value().equals(dep.getWord())) {
					System.out
							.println("ERROR FOR DEP VALUE: dep word is:"
									+ td.dep().value()
									+ ",but expected is from sentence:"
									+ dep.getWord());
				}
				System.out.println("dependency relation: " + td.reln());
				ArrayList<DependencyWrapper> temp = depend_struct.get(td.reln()
						.toString());
				if (temp != null) {
					temp.add(dw);// put gov and dep in Hash map for dependency
				} else {
					System.out
							.println("ERROR FOR DEP RELN: In hash map do not exist allocated array list for dependency relation: "
									+ td.reln());
				}

			}
		}
		if (num_sent != 1)
			System.out
					.println("ERROR:Doslo je do neke greske jer je umesto samo jedne recenice prepoznato ukupno "
							+ num_sent
							+ " recenica koje je podeljeno na tokene iako sam mu ja poslao jednu recenicu kao argument");

		// This is the coreference link graph
		// Each chain stores a set of mentions that link to each other,
		// along with a method for getting the most representative mention
		// Both sentence and token offsets start at 1!
		// Map<Integer, CorefChain> graph =
		// document.get(CorefChainAnnotation.class);

	}

	public static double lexSimilarity(PosWrapper w_a, PosWrapper w_b) {
		String a=w_a.getLema();
		String b=w_b.getLema();
		// NMCLCSn(a,b):
		if (a == null || b == null || a.length() == 0 || b.length() == 0)
			return 0;
		return Math.pow(StringUtils.longestCommonContiguousSubstring(a, b), 2)
				/ (a.length() * b.length());
	}

	public static double semSimilarity(PosWrapper a, PosWrapper b) {

		double result = 0.0;
		if (a.getPos().equals(b.getPos())) {
			// treci parametar se zadaje v za verb i n za noun pa mora da se
			// rade te provere
			// check for verb
			for (String pos_v : pos_verbs) {
				if (a.getPos().equals(pos_v)) {
					// highest score of similarity
					result = WN_lin.max(a.getWord(), b.getWord(), "v");
					return result;
				}
			}
			// check for verb
			for (String pos_n : pos_nouns) {
				if (a.getPos().equals(pos_n)) {
					// highest score of similarity
					result = WN_lin.max(a.getWord(), b.getWord(), "n");
					return result;
				}
			}

		}
		return result;
	}

	public static double shallowSimTag(ArrayList<PosWrapper> st1,
			ArrayList<PosWrapper> st2, boolean isSemanticSimilarity) {

		double tag = 0;
		boolean putInSet=false;
		int n = st1.size();
		int m = st2.size();
		if(m==0 || n==0)return 0.0;
		ArrayList<PosWrapper> st = new ArrayList<PosWrapper>();

		// make union(of LEMAS,not of WORDS):
		for (PosWrapper s1 : st1) {
			putInSet=false;//because java.util.ConcurrentModificationException when read st and put in that
			if(st.size()==0){
				putInSet=true;
			}else{
				putInSet=true;
				for (PosWrapper s : st) {
					if (s1.getLema().equals(s.getLema()))
						putInSet=false;
						break;
				}
			}
			if(putInSet)st.add(s1);
		}
		for (PosWrapper s2 : st2) {
			putInSet=true;//because java.util.ConcurrentModificationException when read st and put in that
			for (PosWrapper s : st) {
				if (s2.getLema().equals(s.getLema()))
					putInSet=false;
					break;
			}
			if(putInSet)st.add(s2);
		}
		// int r = st.size();

		// calculate vector v1
		ArrayList<Double> v1 = new ArrayList<Double>();
		for (PosWrapper s : st) {
			double max_value = 0;
			for (PosWrapper s1 : st1) {
				double value;
				if (isSemanticSimilarity) {// semantic similarity
					value = semSimilarity(s, s1);
				} else {// lexic similarity
					value = lexSimilarity(s, s1);
				}
				if (value > max_value)
					max_value = value;
			}
			v1.add(max_value);
		}

		// calculate vector v2
		ArrayList<Double> v2 = new ArrayList<Double>();
		for (PosWrapper s : st) {
			double max_value = 0;
			for (PosWrapper s2 : st2) {
				double value;
				if (isSemanticSimilarity) {// semantic similarity
					value = semSimilarity(s, s2);
				} else {// lexic similarity
					value = lexSimilarity(s, s2);
				}
				if (value > max_value)
					max_value = value;
			}
			v2.add(max_value);
		}

		// calculate ||v1||
		double v1_value = 0;
		for (Double v : v1) {
			v1_value += v * v;
		}
		v1_value = Math.sqrt(v1_value);

		// calculate ||v2||
		double v2_value = 0;
		for (Double v : v2) {
			v2_value += v * v;
		}
		v2_value = Math.sqrt(v2_value);

		// calculate v1*v2
		double v_value = 0;
		for (int i = 0; i < v1.size(); i++) {
			v_value += v1.get(i) * v2.get(i);
		}

		// calculate sim(st1,st2)=(v1*v2)/(||v1||*||v2||)
		if(v1_value * v2_value == 0){//for avoiding NaN,because 12/0=NaN
			tag = 0.0;
		}else{
			tag = v_value / (v1_value * v2_value);
		}
		
		return tag;
	}

	public static void calculateShallowAttrib(
			HashMap<String, ArrayList<PosWrapper>> sent1_pos,
			HashMap<String, ArrayList<PosWrapper>> sent2_pos, StringBuffer out) {

		// found nouns and verbs for both sentences,need for
		// diffNouns,diffVerbs...
		ArrayList<PosWrapper> sent1_nouns = new ArrayList<PosWrapper>();
		ArrayList<PosWrapper> sent2_nouns = new ArrayList<PosWrapper>();
		ArrayList<PosWrapper> sent1_verbs = new ArrayList<PosWrapper>();
		ArrayList<PosWrapper> sent2_verbs = new ArrayList<PosWrapper>();
		for (String n : pos_nouns) {
			sent1_nouns.addAll(sent1_pos.get(n));
			sent2_nouns.addAll(sent2_pos.get(n));
		}
		for (String v : pos_verbs) {
			sent1_verbs.addAll(sent1_pos.get(v));
			sent2_verbs.addAll(sent2_pos.get(v));
		}

		// calculate diff_Tag_
		for (String p : pos) {
			// make diff_Tag_p
			int s1_num = sent1_pos.get(p).size();
			int s2_num = sent2_pos.get(p).size();
			int tag = Math.abs(s1_num - s2_num);
			out.append("" + tag + ",");
		}

		// calculate diffNouns
		int s1_num_nouns = sent1_nouns.size();
		int s2_num_nouns = sent2_nouns.size();
		int tag_nouns = Math.abs(s1_num_nouns - s2_num_nouns);
		out.append("" + tag_nouns + ",");

		// calculate diffVerbs
		int s1_num_verbs = sent1_verbs.size();
		int s2_num_verbs = sent2_verbs.size();
		int tag_verbs = Math.abs(s1_num_verbs - s2_num_verbs);
		out.append("" + tag_verbs + ",");

		// calculate semSim_Tag_
		for (String p : pos_verb_nouns) {// semantic similarity only for nouns
											// and for verbs
			// make semSim_Tag_p
			double tag = shallowSimTag(sent1_pos.get(p), sent2_pos.get(p), true);
			out.append("" + cutDecimals(tag) + ",");
		}

		// calculate semSimNouns
		double tag_s = shallowSimTag(sent1_nouns, sent2_nouns, true);
		out.append("" + cutDecimals(tag_s) + ",");

		// calculate semSimVerbs
		tag_s = shallowSimTag(sent1_verbs, sent2_verbs, true);
		out.append("" + cutDecimals(tag_s) + ",");

		// calculate lexSim_Tag_
		for (String p : pos) {
			// make lexSim_Tag_p
			double tag = shallowSimTag(sent1_pos.get(p), sent2_pos.get(p),
					false);
			out.append("" + cutDecimals(tag) + ",");
		}

		// calculate lexSimNouns
		double tag_l = shallowSimTag(sent1_nouns, sent2_nouns, false);
		out.append("" + cutDecimals(tag_l) + ",");

		// calculate lexSimVerbs
		tag_l = shallowSimTag(sent1_verbs, sent2_verbs, false);
		out.append("" + cutDecimals(tag_l) + ",");

	}

	public static double sim_dep(DependencyWrapper d1, DependencyWrapper d2,
			boolean isSemanticSimilarity) {
		// big step with semantic deep similarity:only calculate when dep and
		// gov are nouns or verbs
		double value;
		if (isSemanticSimilarity) {// semantic similarity
			// check that dov and gov to be noun or verb for both sentence
			int numDiscovered = 0;
			for (String vn : pos_verb_nouns) {
				if (vn.equals(d1.getGovernor().getPos()))
					numDiscovered++;
				if (vn.equals(d2.getGovernor().getPos()))
					numDiscovered++;
				if (vn.equals(d1.getDependent().getPos()))
					numDiscovered++;
				if (vn.equals(d2.getDependent().getPos()))
					numDiscovered++;
			}
			if (numDiscovered == 4) {
				double sim_value_g = semSimilarity(d1.getGovernor(),
						d2.getGovernor());
				double sim_value_d = semSimilarity(d1.getDependent(),
						d2.getDependent());
				value = sim_value_g * Math.pow(2, sim_value_d - 1);
			} else {
				// UHH PAZI ZA OVE DEFAULT VREDNOSTI KOD SEMANTICKE SLICNOSTI I
				// LEXICKE JER SINUSNA I KOSINUSNA SLICNOST ID OD -1 do +1???
				value = 0.0;
			}

		} else {// lexic similarity
			double sim_value_g = lexSimilarity(d1.getGovernor(), d2.getGovernor());
			double sim_value_d = lexSimilarity(d1.getDependent(), d2.getDependent());
			value = sim_value_g * Math.pow(2, sim_value_d - 1);
		}

		return value;
	}

	public static double deepSimTag(ArrayList<DependencyWrapper> sr1,
			ArrayList<DependencyWrapper> sr2, boolean isSemanticSimilarity) {

		double tag = 0;
		int m = sr1.size();
		int n = sr2.size();
		if(m==0 || n==0)return 0.0;

		// declare M and fill with calculated values(i==row(m),j==colum(n))
		double[][] M = new double[m][n];
		for (int i = 0; i < m; i++)
			for (int j = 0; j < n; j++)
				M[i][j] = sim_dep(sr1.get(i), sr2.get(j), isSemanticSimilarity);

		// discover list B values
		ArrayList<Double> B = new ArrayList<Double>();
		while (true) {
			double max_value = 0;
			// setting -1 to be sure that next for loops will set max_i and
			// max_j
			int max_i = -1;// column where is discovered max element
			int max_j = -1;// row where is discovered max element
			for (int i = 0; i < m; i++) {
				for (int j = 0; j < n; j++) {
					if (M[i][j] > max_value) {
						max_value = M[i][j];
						max_i = i;
						max_j = j;
					}
				}
			}
			if (max_value == 0) {
				break;
			} else {
				B.add(max_value);
				// reset row
				for (int k = 0; k < n; k++)
					M[max_i][k] = 0;
				// reset column
				for (int k = 0; k < m; k++)
					M[k][max_j] = 0;
			}
		}

		// calculate B_value
		double B_value = 0;
		for (Double d : B) {
			B_value += d;
		}

		// calculate sim(sr1,sr2)=B_value*(m+n)/(2mn)
		if(2 * m * n == 0){
			tag = 0.0;
		}else{
			tag = B_value * (m + n) / (2 * m * n);
		}
		

		return tag;
	}

	public static void calculateDeepAttrib(
			HashMap<String, ArrayList<DependencyWrapper>> sent1_depend,
			HashMap<String, ArrayList<DependencyWrapper>> sent2_depend,
			StringBuffer out) {
		// calculate diff_Dep_
		for (String d : depend) {
			// make diff_Dep_d
			int s1_num = sent1_depend.get(d).size();
			int s2_num = sent2_depend.get(d).size();
			int tag = Math.abs(s1_num - s2_num);
			out.append("" + tag + ",");
		}

		// calculate semSim_Dep_
		for (String d : depend) {// semantic similarity only for nouns and for
									// verbs
			// make semSim_Dep_p
			double tag = deepSimTag(sent1_depend.get(d), sent2_depend.get(d),
					true);
			out.append("" + cutDecimals(tag) + ",");
		}

		// calculate lexSim_Dep_
		for (String d : depend) {
			// make lexSim_Dep_p
			double tag = deepSimTag(sent1_depend.get(d), sent2_depend.get(d),
					false);
			out.append("" + cutDecimals(tag) + ",");
		}

	}

	public static void calculateGeneralAttrib(
			HashMap<String, ArrayList<PosWrapper>> sent1_pos,
			HashMap<String, ArrayList<PosWrapper>> sent2_pos, StringBuffer out) {

		// merge words from first sentence
		ArrayList<PosWrapper> s1 = new ArrayList<PosWrapper>();
		for (ArrayList<PosWrapper> values : sent1_pos.values()) {
			s1.addAll(values);
		}
		// merge words from second sentence
		ArrayList<PosWrapper> s2 = new ArrayList<PosWrapper>();
		for (ArrayList<PosWrapper> values : sent2_pos.values()) {
			s2.addAll(values);
		}

		// JOS JEDNA MOGUCA RUPA DA ZA OVE ATRIBUTE SE NE KORISTE
		// CD,INTERPUNKCIJSKI I UNKNOWN,
		// ALI TO SAM RESIO TAKO STO U OVIM HASH LISTAMA SE TO NE UBACA I OVE
		// HAS LISTE IMAJU CISTO POS

		// calculate diff_All
		int s1_size = s1.size();
		int s2_size = s2.size();
		int diff_All = Math.abs(s1_size - s2_size);
		out.append("" + diff_All + ",");

		// calculate overallLexsim
		double overallLexsim = shallowSimTag(s1, s2, false);// lexical
															// similarity
															// between all words
		out.append("" + cutDecimals(overallLexsim) + ",");

	}

	public static void read_config() {
		Properties config = new Properties();
		try {
			config.load(new FileInputStream("config/conf.properties"));
			NAME_OF_INPUT_FILE = config.getProperty("NAME_OF_INPUT_FILE");
			NAME_OF_OUTPUT_FILE = config.getProperty("NAME_OF_OUTPUT_FILE");
			WORD_NET_DIRECTORY = config.getProperty("WORD_NET_DIRECTORY");
			WORD_NET_VERSION = config.getProperty("WORD_NET_VERSION");
			DOUBLE_NUM_DECIMALS = Integer.parseInt(config
					.getProperty("DOUBLE_NUM_DECIMALS"));
			//make output decimal format
			String format="#.";
			for(int i=0;i<DOUBLE_NUM_DECIMALS;i++)format+="#";
			out_DF = new DecimalFormat(format);
		} catch (IOException ex) {
			System.out
					.println("In config directory must exist file conf.properties with value of attributes!");
		}

	}
	public static void load_Word_Net(){
		// 1. SET UP:

		// Let's make it easy for the user. So, rather than set
		// pointers in 'Environment Variables' etc. let's allow the
		// user to define exactly where they have put WordNet(s)
		// String dir =
		// "C:/Users/DEXXX/Desktop/PSZ/Libraries/word_net";
		// That is, you may have version 3.0 sitting in the above
		// directory e.g. C:/Program Files/WordNet/3.0/dict
		// The corresponding IC files folder should be in this same
		// directory e.g. C:/Program
		// Files/WordNet/3.0/WordNet-InfoContent-3.0

		// Option 1 (Perl default): specify the version of WordNet
		// you want to use (assuming that you have a copy of it) and
		// use the default IC file [ic-semcor.dat]
		JWS ws = new JWS(WORD_NET_DIRECTORY, WORD_NET_VERSION);

		// 2. EXAMPLES OF USE:

		// 2.2 [LIN MEASURE]
		WN_lin = ws.getLin();

		// highest score of similarity
		//result = lin.max(a.getWord(), b.getWord(), "v");
	}

	public static void prepare_header_of_arff_file(StringBuffer out) {

		out.append("@relation " + NAME_OF_OUTPUT_FILE);
		out.append("\n");

		for (String p : pos) {
			out.append("@ATTRIBUTE diff_Tag_" + p + "	numeric");
			out.append("\n");
		}

		out.append("@ATTRIBUTE diffNouns	numeric");
		out.append("\n");

		out.append("@ATTRIBUTE diffVerbs	numeric");
		out.append("\n");

		for (String p : pos_verb_nouns) {
			out.append("@ATTRIBUTE semSim_Tag_" + p + "	REAL");
			out.append("\n");
		}

		out.append("@ATTRIBUTE semSimNouns	REAL");
		out.append("\n");

		out.append("@ATTRIBUTE semSimVerbs	REAL");
		out.append("\n");

		for (String p : pos) {
			out.append("@ATTRIBUTE lexSim_Tag_" + p + "	REAL");
			out.append("\n");
		}

		out.append("@ATTRIBUTE lexSimNouns	REAL");
		out.append("\n");

		out.append("@ATTRIBUTE lexSimVerbs	REAL");
		out.append("\n");

		for (String d : depend) {
			out.append("@ATTRIBUTE diff_Dep_" + d + "	numeric");
			out.append("\n");
		}
		for (String d : depend) {
			out.append("@ATTRIBUTE semSim_Dep_" + d + "	REAL");
			out.append("\n");
		}
		for (String d : depend) {
			out.append("@ATTRIBUTE lexSim_Dep_" + d + "	REAL");
			out.append("\n");
		}

		out.append("@ATTRIBUTE diff_All	numeric");
		out.append("\n");

		out.append("@ATTRIBUTE overallLexsim	REAL");
		out.append("\n");

		out.append("@ATTRIBUTE Quality	{0,1}");
		out.append("\n");

		out.append("\n");
		
		out.append("@data");
		out.append("\n");

	}
	public static double cutDecimals(double input){
		return Double.valueOf(out_DF.format(input));
	}

	public static void main(String[] args) {

		PrintStream out;
		StringBuffer outputStringBuffer;

		read_config();// read configuration file with values needs for word_net
		load_Word_Net();//load WORD_NET 3.0

		HashMap<String, ArrayList<PosWrapper>> sent1_pos = new HashMap<String, ArrayList<PosWrapper>>(); // words
		HashMap<String, ArrayList<PosWrapper>> sent2_pos = new HashMap<String, ArrayList<PosWrapper>>();

		HashMap<String, ArrayList<DependencyWrapper>> sent1_depend = new HashMap<String, ArrayList<DependencyWrapper>>(); // words
		HashMap<String, ArrayList<DependencyWrapper>> sent2_depend = new HashMap<String, ArrayList<DependencyWrapper>>();

		// initialization of hash maps for sentences
		for (String p : pos) {
			sent1_pos.put(p, new ArrayList<PosWrapper>());
			sent2_pos.put(p, new ArrayList<PosWrapper>());
		}
		for (String d : depend) {
			sent1_depend.put(d, new ArrayList<DependencyWrapper>());
			sent2_depend.put(d, new ArrayList<DependencyWrapper>());
		}

		try {
			BufferedReader reader = new BufferedReader(new FileReader("input/"
					+ NAME_OF_INPUT_FILE));
			String oneLine = reader.readLine(); // ignore first line with
												// description

			oneLine = reader.readLine();

			out = new PrintStream(new FileOutputStream("output/"
					+ NAME_OF_OUTPUT_FILE + ".arff"));
			outputStringBuffer = new StringBuffer();

			prepare_header_of_arff_file(outputStringBuffer);
			out.println(outputStringBuffer.toString());
			outputStringBuffer.delete(0, outputStringBuffer.length() - 1);// clear
																			// String
																			// buffer

			while (oneLine != null) {
				String[] tokens = oneLine.split("\\t");
				// tokens[0] -> semantic value
				// tokens[1] -> ID of first sentence
				// tokens[2] -> ID of second sentence
				// tokens[3] -> first sentence
				// tokens[4] -> second sentence
				// for(String s:tokens)System.out.println(s);

				// clear arrays and words from previous sentence
				for (String p : pos) {
					sent1_pos.get(p).clear();
					sent2_pos.get(p).clear();
				}
				for (String d : depend) {
					sent1_depend.get(d).clear();
					sent2_depend.get(d).clear();
				}

				unrollSentence(tokens[3], sent1_pos, sent1_depend);
				unrollSentence(tokens[4], sent2_pos, sent2_depend);

				calculateShallowAttrib(sent1_pos, sent2_pos, outputStringBuffer);
				calculateDeepAttrib(sent1_depend, sent2_depend,
						outputStringBuffer);
				calculateGeneralAttrib(sent1_pos, sent2_pos, outputStringBuffer);// prebroj
																					// reci
																					// i
																					// nadji
																					// im
																					// leksicku
																					// slicnost

				outputStringBuffer.append(tokens[0]);// add last
														// attribut-Quality(value
														// of similarity from
														// three people)

				out.println(outputStringBuffer.toString());
				outputStringBuffer.delete(0, outputStringBuffer.length());// clear
																				// String
																				// buffer

				oneLine = reader.readLine();
			}
			reader.close();
			out.close();
			System.out.println("End!");

		} catch (IOException e) {
			e.printStackTrace();
		}

	}
}
