package pipeline;

import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.List;
import java.util.Map.Entry;

import abs.syn.Dependency;
import abs.syn.DependencyPath;
import abs.syn.Parse;

import setting.BioNLPSettings;
import util.io.FileUtil;
import classifier.SVMWrapper;
import def.BioNLPDocument;
import def.Event;
import def.EventType;
import def.Protein;
import def.TSpan;

public class TriggerDetectionSVM extends TriggerDetection {

	protected TriggerDetectionSVM(){
		super();
	}

	@Override public void init(){
		super.init();
		// read the dictionary
		String[] lines = FileUtil.getTextFromFile(BioNLPSettings.getValue("triggerFeatureMap")).split("\n");
		if (lines.length!=1){
			System.out.println("reading ... "+lines.length +" feature map");
			for (String line: lines){
				String[] pair = line.split("\t");
				str2id.put(pair[0], Integer.parseInt(pair[1]));
			}
		}
	}

	@Override public ArrayList<TSpan> detectTriggers(BioNLPDocument doc){
		//		AbstractText text = (AbstractText)doc.text;
		//		String filePrefix = doc.fileId;

		ArrayList<TSpan> predictions  = new ArrayList<TSpan>(), alltspans = new ArrayList<TSpan>();
		ArrayList<Hashtable<String, Integer>> examples = new ArrayList<Hashtable<String, Integer>>();

		// for each word, build
		for (int i = 0; i < doc.parses.length; i++){
			for (int j = 0; j < doc.parses[i].words.length; j++){
				TSpan newSpan = new TSpan();
				newSpan.startIdx = doc.parses[i].startPos[j]+doc.aText.sentStartPos[i];
				//						recordStart.add(found);
				newSpan.endIdx = doc.parses[i].endPos[j]+doc.aText.sentStartPos[i];
				
				
				newSpan.fileId = doc.fileId;
				//				newSpan.text = doc.parses[i].words[j];
				newSpan.text = doc.text.text.substring(newSpan.startIdx, newSpan.endIdx);
				checkValidTrigger(doc,newSpan);
//				System.err.print(newSpan.text);
				if (newSpan.startIdx == -1){
//					System.err.println(" failed");
					continue;
				}
				
				newSpan.text = doc.text.text.substring(newSpan.startIdx, newSpan.endIdx);
//				System.err.println(" ok => "+newSpan.text);
				if ( newSpan.text.contains("-") && !dict.contains(newSpan.text.replace("-", ""))){
					//					System.err.println("[TRIGGER_HYPHEN] "+ newSpan.text);
					int separator = newSpan.text.indexOf("-");
					if (separator != newSpan.text.length() - 1 && separator != 0){
						TSpan newSpan2 = new TSpan();
						newSpan2.startIdx = newSpan.startIdx;
						newSpan2.endIdx = newSpan.startIdx + separator;
						newSpan2.fileId = doc.fileId;
						newSpan2.text = doc.text.text.substring(newSpan2.startIdx, newSpan2.endIdx);
						//						if (newSpan2.text.trim().length() == 0){
						//							System.err.println("[HMMMMMM...]"+newSpan2+"[["+newSpan.text+"]]");
						//						}
						newSpan.startIdx += separator + 1;
						newSpan.text = doc.text.text.substring(newSpan.startIdx, newSpan.endIdx);
						//						recordStart.add(found);

						Hashtable<String, Integer> example = extractFeature(doc, i,j, newSpan2.text);
						if (example!=null){
							examples.add(example);
							alltspans.add(newSpan2);
						}
					}
				}
				if (newSpan.text.length() > 0){
					Hashtable<String, Integer> example = extractFeature(doc, i,j, newSpan.text);
					if (example!=null){
						examples.add(example);
						alltspans.add(newSpan);
					}
				}
			}
		}
		StringBuffer sb = new StringBuffer();
		for (Hashtable<String ,Integer> ex: examples){
			ArrayList<Integer> features = new ArrayList<Integer>();
			boolean tclass = true;
			for (String key: ex.keySet()){
				if (key.equals("NEG_CLASS"))
					tclass = false;
				else{
//					if (key.contains("DEP"))
						if (str2id.containsKey(key)){
							features.add(str2id.get(key));
						}
				}
			}
			Collections.sort(features);
			if (tclass)
				sb.append("+1");
			else
				sb.append("-1");
			for (int i = 0; i < features.size(); i++){
				sb.append(" "+features.get(i)+":1");
			}
			sb.append("\n");
		}
		FileUtil.writeTextToFile(sb.toString(), "examples.svm");
		// apply classifier
		SVMWrapper svm = new SVMWrapper();
		long startTime = System.currentTimeMillis();
		svm.test("examples.svm", BioNLPSettings.getValue("TRIGGER_MODEL"), "svm_predictions");
		System.err.println("launching svm ... took "+(System.currentTimeMillis() - startTime) + " milliSeconds");
		// process the predictions
		String[] scores = FileUtil.getTextFromFile("svm_predictions").split("\n");
		if (alltspans.size() != scores.length){
			System.err.println("[detectTriggerSVM] TSPAN.size != scores.length");
		}
		for (int i = 0; i < scores.length; i++){
			if (Double.parseDouble(scores[i])> BioNLPSettings.getFloatValue("SVM_TRIGGER_THRESHOLD")){
				predictions.add(alltspans.get(i));
				System.err.println("[detectTriggerSVM] @"+i+" "+alltspans.get(i)+ " "+scores[i]);
			}
		}
		return predictions;
	}

	/**
	 * // features
					// tokens, suffix stemming, 
					// IGNORE capitalization, punctuation, numeric, char bi-grams, trigrams, presence in a gazetteer
	 * @param doc
	 * @param sentId
	 * @param wordId
	 * @return
	 */
	public static Hashtable<String, Integer> extractFeature(BioNLPDocument doc, int sentId, int wordId, String word){
		int i = sentId, j = wordId;

		//		String word = doc.parses[i].words[j];
		Hashtable<String, Integer> example = new Hashtable<String, Integer>();
		//		example.put("NEG_CLASS", 1);

		// features
		// tokens, suffix stemming, 
		// IGNORE capitalization, punctuation, numeric, char bi-grams, trigrams, presence in a gazetteer
		example.put("TOKEN_"+word.toLowerCase(),1);
		// I use stripAffixes here although it has only a very simple prefix stripping process
		example.put("TOKEN_AFFIX_STEM_"+stemmer.stripAffixes(word.toLowerCase()).toLowerCase(),1);

		//		example.put("TOKEN_SIZE_"+1, 1);

		// tokens, linear context and dep context
		//								int sentid = doc.getSentenceIdByPos(ts.startIdx);
		Parse parse = doc.parses[i];
		ArrayList<Integer> words = new ArrayList<Integer>();
		ArrayList<Integer> proteins = new ArrayList<Integer>();

		words.add(j+1);
		for (int k = 0; k < parse.leaves.size(); k++){
			for (Protein protein: doc.proteins){
				if (doc.getSentenceIdByPos(protein.tspan.startIdx) == i){
					if (parse.startPos[k] <= protein.tspan.startIdx-doc.aText.sentStartPos[i]){
						if (parse.endPos[k] >=  protein.tspan.startIdx-doc.aText.sentStartPos[i]){
							proteins.add(k+1);
						}
					}
					else{
						if (parse.startPos[k] < protein.tspan.endIdx-doc.aText.sentStartPos[i]){
							proteins.add(k+1);
						}
					}
				}
			}
		}

		//		if (proteins.contains(j+1))
		//			return null;

		// linear context
		int linear_context_size = 2;
		//								int startWord = words.get(0), endWord = words.get(words.size() - 1);
		//		int startWord = words.get(j-1) - 1, endWord = words.get(j-1) - 1;
		int startWord = j, endWord = j;
		if (proteins.contains(j+1)){
			example.put("LINEAR_CONTAIN_PROTEIN", 1);
			linear_context_size = 1;
		}
		for (int ii = 0; ii < linear_context_size && startWord - ii > 0 ; ii ++){
			if (proteins.contains(endWord+ii+1+1)){
				example.put("LINEAR_PREFIX_PROTEIN", 1);
			}
			else{
				example.put("LINEAR_PREFIX_"+parse.words[startWord-ii-1].toLowerCase(), 1);
			}
		}
		linear_context_size = 2;
		for (int ii = 0; ii < linear_context_size && endWord + ii +1 < parse.words.length ; ii ++){
			if (proteins.contains(endWord+ii+1+1)){
				example.put("LINEAR_SUFFIX_PROTEIN", 1);
			}
			else{
				example.put("LINEAR_SUFFIX_"+parse.words[endWord+ii+1].toLowerCase(), 1);
			}
		}


		//								System.err.println("[WORD]"+words);
		//								System.err.println("[PROTEIN]"+proteins);
		for (Dependency dep : parse.dependencies){
			if (words.contains(dep.govIdx)){
				// if dependent  = protein
				if (proteins.contains(dep.depIdx)){
					example.put("DEP_GOV_PROTEIN", 1);
				}else{
					example.put("DEP_GOV_"+parse.words[dep.depIdx-1],1);
				}
			}
			else if (words.contains(dep.depIdx)){
				// if dependent  = protein
				if (proteins.contains(dep.govIdx)){
					example.put("DEP_DEP_PROTEIN", 1);
				}else{
					example.put("DEP_DEP_"+parse.words[dep.depIdx-1],1);
				}
			}
		}

		//TODO :frequency features
		// named entities in the sentence?  AND in a linear window
		// also the bag-of-word counts

		// dep features
		// <= 3 depth, tokens, dependency types, seq of dep types
		for (int k = 0; k < parse.leaves.size(); k++){
			if (words.contains(k+1)) 
				continue;
			List<DependencyPath> paths = parse.getDependencyPath(parse.startPos[j], parse.endPos[j], parse.startPos[k], parse.endPos[k]);
			ArrayList<DependencyPath> shortestPaths = new ArrayList<DependencyPath>();
			int maxLength = 0;
			if (paths.size() == 0){
				example.put("DEP_NOT_FOUND", 1);
			}
			for (DependencyPath deppath:paths){
				if (deppath.deps.size() <= 3){
					if (deppath.deps.size() < maxLength){
						maxLength = deppath.deps.size();
						shortestPaths.clear();
						shortestPaths.add(deppath);
					}
					else{
						shortestPaths.add(deppath);
					}
				}
			}

			for (DependencyPath dp : shortestPaths){
				String pathStr = "";
				for (int jj = 0;jj < dp.deps.size(); jj++){
					String str = null;
					if (proteins.contains(dp.deps.get(jj).depIdx)){
						str = "PROTEIN";
					}
					else{
						if (dp.isLeftArgument.get(jj)){
							str = parse.words[dp.deps.get(jj).depIdx-1].toLowerCase();
						}
						else{
							str = parse.words[dp.deps.get(jj).govIdx-1].toLowerCase();
						}
					}
					example.put("DEP_PATH_TOKEN_"+str, 1);
					//					example.put("DEP_PATH_TYPE_"+(dp.deps.get(jj).name), 1);
					pathStr+= "_"+dp.deps.get(jj).name+":"+str;
					example.put("DEP_PATH"+pathStr, 1);
				}
				//				example.put("DEP_PATH"+pathStr, 1);
			}
		}
		return example;
	}


	public ArrayList<Hashtable<String, Integer>> prepareSVMData(String dataPath, String parsePath, boolean includeNegative){
		//FIXME linear and dep context + named entity
		// read the paper
		//http://www.aclweb.org/anthology-new/W/W09/W09-1402.pdf
		String path = dataPath;
//		BioNLPSettings.setValue("parsePath", parsePath);
//		BioNLPSettings.setValue("dataPath", dataPath);
		ArrayList<Hashtable<String, Integer>> examples = new ArrayList<Hashtable<String, Integer>>();
		for (String filename :  new File(path).list()){
			//			String filename = "9799798.txt";
			if (filename.endsWith(".txt")){
				//				sum.add(processDocument(path, filename.replace(".txt", "")));
				String prefix = filename.replace(".txt", "");
				System.err.println("Starting "+prefix);

				BioNLPDocument doc = new BioNLPDocument(path, prefix);
				if (!doc.valid || doc.events==null)
					continue;
				HashSet<TSpan> visited = new HashSet<TSpan>();
				ArrayList<Integer>[] positive = new ArrayList[doc.parses.length];
				for (int i = 0; i < doc.parses.length; i++){
					positive[i] = new ArrayList<Integer>();
				}

				System.err.println(" including positives "+doc.events.size());
				for (Event e: doc.events.values()){
					System.err.println(" including pos_event");
					if (visited.contains(e.tspan)){
						continue;
					}
					TSpan ts = e.tspan;

					// split the trigger expression by "-"
					// if one of the words is significant,
					// then just use that one.
					if (ts.text.contains(" ")){
						String[] items = ts.text.toLowerCase().split(" ");
						int s = ts.startIdx;
						ArrayList<Integer> wordStart = new ArrayList<Integer>(), wordEnd = new ArrayList<Integer>();
						ArrayList<Integer> variations = new ArrayList<Integer>();
						for (int i = 0; i < items.length; i++){
							if (freq.get(ts.textType.toString()).containsKey(items[i])){
								// check other types
								int c = 0; 
								for (EventType et: EventType.values()){
									if (freq.get(et.toString()).containsKey(items[i])){
										c++;
									}
								}
								wordStart.add(s);
								wordEnd.add(s+items[i].length());
								variations.add(c);
							}
							else{
								// check if containing hyphens
								if (items[i].contains("-")){
									String[] pair = items[i].split("-");
									if (freq.get(ts.textType.toString()).containsKey(pair[0])){
										int c = 0;
										for (EventType et: EventType.values()){
											if (freq.get(et.toString()).containsKey(pair[0])){
												c++;
											}
										}
										wordStart.add(s);
										wordEnd.add(s+pair[0].length());
										variations.add(c);
									}

									if (pair.length>1&&freq.get(ts.textType.toString()).containsKey(pair[1])){
										int c = 0;
										for (EventType et: EventType.values()){
											if (freq.get(et.toString()).containsKey(pair[1])){
												c++;
											}
										}
										wordStart.add(s+pair[0].length()+1);
										wordEnd.add(s+pair[0].length()+1+pair[1].length());
										variations.add(c);
									}
								}
							}
							s+=items[i].length()+1;
						}
						if (wordStart.size() == 0){
							System.err.println("[SVM_PREPARE] MULTIWORD CAN'T SPLIT : "+ts.text);
						}
						else{
							
							// pick up the least variational trigger words
							int minIdx = -1, min = 10;
							for (int i = 0; i < wordStart.size(); i++){
								if (variations.get(i) >0 && variations.get(i) < min){
									min = variations.get(i);
									minIdx = i;
								}
							}
							System.err.println("[SVM_PREPARE] old ts = "+ts);
							
							
							
							if (min == 1){
								ts.startIdx = wordStart.get(minIdx);
								ts.endIdx = wordEnd.get(minIdx);
								ts.text = doc.text.text.substring(ts.startIdx, ts.endIdx);
							}
							else{
								ts.startIdx = wordStart.get(minIdx);
								ts.endIdx = wordEnd.get(minIdx);
								ts.text = doc.text.text.substring(ts.startIdx, ts.endIdx);
							}
							System.err.println("[SVM_PREPARE] new ts = "+ts);
						}
					}
					// ***END SPLITTING TRIGGERS***

					int sentid = doc.getSentenceIdByPos(ts.startIdx);

					Parse parse = doc.parses[sentid];

					ArrayList<Integer> words = new ArrayList<Integer>(), proteins = new ArrayList<Integer>();
					for (int i = 0; i < parse.leaves.size(); i++){
						if (words.contains(i+1)) continue;
						if (parse.startPos[i] <= ts.startIdx-doc.aText.sentStartPos[sentid]){
							if (parse.endPos[i] >= ts.startIdx-doc.aText.sentStartPos[sentid]){
								words.add(i+1);
							}
						}
						else{
							if (parse.startPos[i] < ts.endIdx-doc.aText.sentStartPos[sentid]){ 
								// FIXME: ??? logic right?
							    //                                                     || parse.endPos[i] <= ts.endIdx-doc.aText.sentStartPos[sentid]){
								words.add(i+1);
							}
						}
						for (Protein protein: doc.proteins){
							if (doc.getSentenceIdByPos(protein.tspan.startIdx) == sentid){
								if (parse.startPos[i] <= protein.tspan.startIdx-doc.aText.sentStartPos[sentid]){
									if (parse.endPos[i] >=  protein.tspan.startIdx-doc.aText.sentStartPos[sentid]){
										proteins.add(i+1);
									}
								}
								else{
									if (parse.startPos[i] < protein.tspan.endIdx-doc.aText.sentStartPos[sentid]|| parse.endPos[i] <= protein.tspan.endIdx-doc.aText.sentStartPos[sentid]){
										proteins.add(i+1);
									}
								}
							}
						}
					}
					positive[sentid].addAll(words);
					positive[sentid].addAll(proteins);
					//					System.err.println("[WORD]"+words);
					//					System.err.println("[PROTEIN]"+proteins);
					String[] items = ts.text.split("(\\s|\\-)+");
					if (words.size() != items.length){
						System.err.println("[SVM_PREPARE]"+doc.fileId+"  not match   "+ e);
						System.err.println("[SVM_PREPARE] words = "+words);
						System.err.println("[SVM_PREPARE] items = "+items.length);
						System.err.println("[SVM_PREPARE] fixing... ");
						items = ts.text.split("(\\s)+");
						if (words.size()==items.length){
							System.err.println("[SVM_PREPARE] fixed... ");
						}
						else{
							System.err.println("[SVM_PREPARE] fixing failed... items = "+items.length );
							continue;
						}
					}

					for (int j = 0; j < items.length; j++){
						//						String item = items[j];

						Hashtable<String, Integer> example = extractFeature(doc, sentid, words.get(j) - 1, doc.parses[sentid].words[words.get(j) - 1] );
						/*int num = 0;
						//						for (int i = 0; i < items.length; i++)
						{					
							if (item.length()==0)
								continue;
							else{
								num++;
								example.put("TOKEN_"+item.toLowerCase(),1);
								// I use stripAffixes here although it has only a very simple prefix stripping process
								example.put("TOKEN_AFFIX_STEM_"+stemmer.stripAffixes(item).toLowerCase(),1);
							}
							//						}
						 */
						System.err.println("adding positive");
						visited.add(e.tspan);
						// TODO: this is where we store the associations
						example.put("SRC_"+filename+":"+sentid+":"+(words.get(j) - 1)+":"+doc.parses[sentid].words[words.get(j) - 1],1);
						examples.add(example);
						//							for (String key: example.keySet()){
						//								System.err.print(" "+key);
						//							}
						//							System.err.println();
						//						}
					}

				}
				System.err.println(" including negatives");
				if (includeNegative){
					for (int i = 0; i < doc.parses.length; i++){
						for (int j = 0; j < doc.parses[i].words.length; j++){
							if (!positive[i].contains(j+1)){
								Hashtable<String, Integer> example = extractFeature(doc, i, j, doc.parses[i].words[j]);
								if (example != null){ 
									example.put("NEG_CLASS", 1);
									example.put("SRC_"+filename+":"+i+":"+j+":"+doc.parses[i].words[j], 1);
									examples.add(example);
								}
							}
						}
					}
				}
			}
		}
		return examples;
	}

	public void learn(){
		System.err.println("stuffing training data");
		str2id.clear();
		BioNLPSettings.init("train.conf");
		ArrayList<Hashtable<String, Integer>> train = prepareSVMData(BioNLPSettings.getValue("dataPath"), BioNLPSettings.getValue("parsePath"), true);
		System.err.println("stuffing dev data");
		BioNLPSettings.init("dev.conf");
		ArrayList<Hashtable<String, Integer>> test = prepareSVMData(BioNLPSettings.getValue("dataPath"), BioNLPSettings.getValue("parsePath"), true);
		System.err.println("encoding...");
		for (Hashtable<String ,Integer> ex: test){
			for (String key: ex.keySet()){
				if (!str2id.containsKey(key)){
					str2id.put(key, str2id.size()+1);
				}
			}
		}
		System.err.println("encoding2...");
		for (Hashtable<String ,Integer> ex: train){
			for (String key: ex.keySet()){
				if (!str2id.containsKey(key)){
					str2id.put(key, str2id.size()+1);
				}
			}
		}


		// feature dictionary
		BioNLPSettings.init("train.conf");
		StringBuffer sb = new StringBuffer();
		if (!new File(BioNLPSettings.getValue("triggerFeatureMap")).exists()){
			for (Entry<String, Integer> entry: str2id.entrySet()){
				sb.append(entry.getKey()+"\t"+entry.getValue()+"\n");
			}
			FileUtil.writeTextToFile(sb.toString(), BioNLPSettings.getValue("triggerFeatureMap"));
		}
		else{
			String[] lines = FileUtil.getTextFromFile("triggerFeatureMap").split("\n");
			System.out.println("reading ... "+lines.length +" feature map");
			for (String line: lines){
				String[] pair = line.split("\t");
				str2id.put(pair[0], Integer.parseInt(pair[1]));
			}
		}

		System.err.println("printing test...");
		sb = new StringBuffer();
		for (Hashtable<String ,Integer> ex: test){
			ArrayList<Integer> features = new ArrayList<Integer>();
			boolean tclass = true;
			for (String key: ex.keySet()){
				if (key.equals("NEG_CLASS"))
					tclass = false;
				else
					features.add(str2id.get(key));
			}
			Collections.sort(features);
			if (tclass)
				sb.append("+1");
			else
				sb.append("-1");
			for (int i = 0; i < features.size(); i++){
				sb.append(" "+features.get(i)+":1");
			}
			sb.append("\n");
		}
		FileUtil.writeTextToFile(sb.toString(), "test_trigger.svm");
		System.err.println("printing train...");
		sb = new StringBuffer();
		for (Hashtable<String ,Integer> ex: train){
			ArrayList<Integer> features = new ArrayList<Integer>();
			boolean tclass = true;
			for (String key: ex.keySet()){
				if (key.equals("NEG_CLASS"))
					tclass = false;
				else{
					if (str2id.containsKey(key))
						features.add(str2id.get(key));
				}
			}
			Collections.sort(features);
			if (tclass)
				sb.append("+1");
			else
				sb.append("-1");
			for (int i = 0; i < features.size(); i++){
				sb.append(" "+features.get(i)+":1");
			}
			sb.append("\n");
		}
		FileUtil.writeTextToFile(sb.toString(), "train_trigger.svm");
		
		// training svm models
		SVMWrapper svm = new SVMWrapper();
		System.err.println("[TRG_Learn]training svm");
		if (!new File(BioNLPSettings.getValue("TRIGGER_MODEL")).exists()){
			svm.train("train_trigger.svm", BioNLPSettings.getValue("TRIGGER_MODEL"));
		}
		else{
			System.err.println("[TRG_Learn]training svm skipped");
		}
		
		System.err.println("[TRG_Learn]testing svm");
		double[] scores = svm.test("test_trigger.svm", BioNLPSettings.getValue("TRIGGER_MODEL"), "test_trigger_predictions");
		System.err.println("precision = "+scores[0]);
		System.err.println("recall = "+scores[1]);
		System.err.println("f1 = "+scores[2]);
	}
}
