package wikiextract.nlp.trainingset.x;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.zip.GZIPInputStream;

import utils.bin.sortvar.Segmenter;
import utils.bin.sortvar.SortComponentsFactory;
import utils.bin.sortvar.Sorter;
import wikiextract.data.util.Bytes;
import wikiextract.data.util.StringUtil;
import wikiextract.nlp.trainingset.x.Definitions.Dataset;
import wikiextract.nlp.trainingset.x.Definitions.Feature1;
import wikiextract.util.io.DelimitedReader;
import wikiextract.util.io.DelimitedWriter;
import wikiextract.util.io.FileOperations;
import edu.stanford.nlp.trees.Tree;

// takes examples file {sentenceId, match, attributesNamesId} (where
// match is possibly null), and combines info with 3 other files:
// parse trees, deps, tokenized, and generates features.

public class RunFirstPass {
	
	static String dataDir = "/projects/pardosa/s2/raphaelh/data/all";
	static String tmpDir = "/projects/pardosa/s2/raphaelh/tmp";
	
	static String input2 = "sentences.stanford.tokenized.cleaned.gz";
	static String input3 = "sentences.stanford.parsed.gz";
	static String input4 = "sentenceDependencies";
	
	static String output1 = "learn/ft";
	static String output2 = "learn/ftIds";
	
	static int CONTEXT_WINDOW_PREV = 5;
	static int CONTEXT_WINDOW_NEXT = 5;

	public static void main(String[] args) throws Exception {
		Settings.parse(args);
		//input2 = tmpDir + "/" + input2;
		//input3 = dataDir + "/" + input3;
		//input4 = dataDir + "/" + input4;
		//String input2p = "/projects/pardosa/data08/raphaelh/tmp/" + input2;
		//String input3p = "/projects/pardosa/data08/raphaelh/tmp/" + input3;
		//String input4p = "/projects/pardosa/data08/raphaelh/tmp/" + input4;
		//String input2p = "/projects/pardosa/s2/raphaelh/tmp/" + input2;
		//String input3p = "/projects/pardosa/s2/raphaelh/tmp/" + input3;
		//String input4p = "/projects/pardosa/s2/raphaelh/data/all/" + input4;
		String input2p = "/projects/db9/raphaelh/nobackup/data/all/" + input2;
		String input3p = "/projects/db9/raphaelh/nobackup/data/all/" + input3;
		String input4p = "/projects/db9/raphaelh/nobackup/data/all/" + input4;
		
		//output1 = tmpDir + "/" + output1 + "_" + Settings.firstPassSuffix();
		//output2 = tmpDir + "/" + output2 + "_" + Settings.firstPassSuffix();
		//String output1p = "/projects/pardosa/data08/raphaelh/tmp/" + output1 + "_" + Settings.firstPassSuffix();
		//String output2p = "/projects/pardosa/data08/raphaelh/tmp/" + output2 + "_" + Settings.firstPassSuffix();
		//String output1p = "/projects/pardosa/s2/raphaelh/tmp/" + output1 + "_" + Settings.firstPassSuffix();
		//String output2p = "/projects/pardosa/s2/raphaelh/tmp/" + output2 + "_" + Settings.firstPassSuffix();
		String output1p = "/projects/db9/raphaelh/nobackup/tmp/" + output1 + "_" + Settings.firstPassSuffix();
		String output2p = "/projects/db9/raphaelh/nobackup/tmp/" + output2 + "_" + Settings.firstPassSuffix();
		
		System.out.println("processing " + Settings.idsFile);
		HashSet<Integer> anids = new HashSet<Integer>();
		//if (Settings.chunk >= -1) 
		{
			DelimitedReader r = new DelimitedReader(tmpDir + "/" + Settings.idsFile);
			String[] t = null;
			while ((t = r.read()) != null) anids.add(Integer.parseInt(t[0]));
			r.close(); 
		}
			
		
		DataInputStream isMatches = new DataInputStream
			((new BufferedInputStream(new FileInputStream(tmpDir + "/" + Settings.trainingsetFile))));
		DelimitedReader isTokenized = new DelimitedReader(input2p, "utf-8", true);
		/*
		DataInputStream isParsed = new DataInputStream(new GZIPInputStream
				  (new BufferedInputStream(new FileInputStream(input3))));
		 */
		DataInputStream isDeps = new DataInputStream
		  (new BufferedInputStream(new FileInputStream(input4p)));
		  
		DataOutputStream osTrain = new DataOutputStream
			(new BufferedOutputStream(new FileOutputStream(output1p)));
		
		MatchesRecord im = new MatchesRecord();
		IteratorTokenized it = new IteratorTokenized();
		IteratorParsed ip = new IteratorParsed();
		IteratorDependencies id = new IteratorDependencies();
		FeaturesRecord out = new FeaturesRecord();
		
		it.read(isTokenized);
		//ip.read(isParsed);
		id.read(isDeps);
		
		boolean computeDeps = 
			Settings.useFeature1(Feature1.DEPENDENCIES) ||
			Settings.useFeature1(Feature1.NEWDEPS) ||
			Settings.useFeature1(Feature1.NEWDEPS2) ||
			Settings.useFeature1(Feature1.NEWDEPS3)
			;
		
		HashMap<String, Integer> hsFeatures = new HashMap<String, Integer>();		
		int lastPrintedSentenceId =-1;
		while (im.read(isMatches)) {
			if (im.sentenceId % 100000 == 0 && im.sentenceId != lastPrintedSentenceId) {
				lastPrintedSentenceId = im.sentenceId;
				System.out.println(im.sentenceId);
			}
			if (!anids.contains(im.attributesNamesId)) continue;
			
			// position tokenized
			while (it.sentenceId < im.sentenceId) it.read(isTokenized);
			//while (ip.sentenceId < im.sentenceId) ip.read(isParsed);
			if (computeDeps)
				while (id.sentenceId < im.sentenceId) id.read(isDeps);
			
			out.attributesNamesId = im.attributesNamesId;
			out.articleId = im.articleId;
			out.sentenceId = im.sentenceId;
			out.useForLearning = true;
			out.features = new List[it.tokenPos.length];
			
			String[] tokens = new String[it.tokenPos.length];
			for (int i=0; i < it.tokenPos.length; i++)
				tokens[i] = it.sentence.substring(it.tokenPos[i][0], it.tokenPos[i][1]).replace(" ", "");
			out.tokens = tokens;
			// check if tokens are ok: necessary because of bug in data
			boolean tokensOk = true;
			for (int ti = 0; ti < tokens.length; ti++) {				
				if (tokens[ti].trim().length() == 0) {
					tokensOk = false;
					/*
					System.out.println("ARGHH: token length 0, " + tokens.length +"\n" + it.sentence);
					for (int i=0; i < tokens.length; i++)
						System.out.println("  " + tokens[i]);
					System.out.println(im.sentenceId);
					System.out.println(im.articleId);
					System.exit(1);
					*/
				}
				if (tokens[ti].indexOf(' ') >= 0) {
					tokensOk = false;
					/*
					System.out.println("ARGHH: token contains space '" + tokens[ti] + "'");
					System.out.println(it.sentence);
					*/
				}
			}
			if (!tokensOk) continue;
			
			out.matchPos = im.matchPos;
			out.matchLabels = im.matchLabels;
			
			for (int i=0; i < it.tokenPos.length; i++) {
				//sbData.append(tokens[i] + " " + labels[i]);
				
				List<Integer> hs = out.features[i] = new ArrayList<Integer>();
				
				if (Settings.useFeature1(Feature1.WORDS))
					hs.add(getFeatureId(tokens[i]/*.toLowerCase()*/, hsFeatures));
				
				if (Settings.useFeature1(Feature1.CONTEXTUALIZATION)) {
					for (int j = i - 1; j >= Math.max(0, i - CONTEXT_WINDOW_PREV); j--) {
						hs.add(getFeatureId(tokens[j]/*.toLowerCase()*/ + "_P", hsFeatures));				
					}
					for (int j = i + 1; j <= Math.min(tokens.length-1, i + CONTEXT_WINDOW_NEXT); j++) {
						hs.add(getFeatureId(tokens[j]/*.toLowerCase()*/ + "_S", hsFeatures));
					}
				}
				
				if (Settings.useFeature1(Feature1.CAPITALIZATION))
					if (Character.isUpperCase(tokens[i].charAt(0))) {
						hs.add(getFeatureId("InitCap", hsFeatures));
					}
				
				if (Settings.useFeature1(Feature1.DIGITS)) {
					boolean allDigit = true;
					for (int j=0; j < tokens[i].length(); j++)
						if (!Character.isDigit(tokens[i].charAt(j))) {
							allDigit = false;
							break; 
						}
					if (allDigit) hs.add(getFeatureId("AllDigit", hsFeatures));
				}

				if (Settings.useFeature1(Feature1.DEPENDENCIES)) {
					String depA = getDependencyAttribute(id.deps, i);
					hs.add(getFeatureId(depA, hsFeatures));
				}
				
				if (Settings.useFeature1(Feature1.NEWDEPS)) {
					List<String> l = getDependencyAttributes(id.deps, i);
					for (String a : l)
						hs.add(getFeatureId(a, hsFeatures));
				}

				if (Settings.useFeature1(Feature1.NEWDEPS2)) {
					List<String> l = getDependencyAttributes2(id.deps, i);
					for (String a : l)
						hs.add(getFeatureId(a, hsFeatures));
				}

				if (Settings.useFeature1(Feature1.NEWDEPS3)) {
					String depA = getDependencyAttribute3(id.deps, i);
					hs.add(getFeatureId(depA, hsFeatures));
				}

			}
			out.write(osTrain);
		}
		isTokenized.close();
		//isParsed.close();
		isDeps.close();
	    isMatches.close();
	    osTrain.close();
	    
	    // write features
	    DelimitedWriter w = new DelimitedWriter(output2p);
	    List<Map.Entry<String,Integer>> fl = new ArrayList<Map.Entry<String,Integer>>(hsFeatures.size());
	    fl.addAll(hsFeatures.entrySet());
	    Collections.sort(fl, new Comparator<Map.Entry<String,Integer>>() { 
	    	public int compare(Map.Entry<String,Integer> a, Map.Entry<String,Integer> b) { return a.getValue() - b.getValue(); } });	    
	    for (Map.Entry<String,Integer> e : fl)
	    	w.write(e.getValue() + "", e.getKey());
	    w.close();
	    
	    // sort by attributesNamesId, articleId, sentenceId
    	Sorter sorter = new utils.bin.sortvar.Sorter();
    	sorter.sort(output1p, new SortComponentsFactory() {

			public utils.bin.sortvar.Comparator createComparator() {
				return new utils.bin.sortvar.Comparator() {
					int anid1, anid2, articleId1, articleId2,
						sentenceId1, sentenceId2;
					
					public int compare() {
						if (anid1 != anid2) return anid1 - anid2;
						if (articleId1 != articleId2)
							return articleId1 - articleId2;
						return sentenceId1 - sentenceId2;
					}

					public void setArg1(byte[] b1, int p1) {
						anid1 = Bytes.bytes2Int(b1, p1);
						articleId1 = Bytes.bytes2Int(b1, p1 + 4);
						sentenceId1 = Bytes.bytes2Int(b1, p1 + 8);
					}

					public void setArg2(byte[] b2, int p2) {
						anid2 = Bytes.bytes2Int(b2, p2);
						articleId2 = Bytes.bytes2Int(b2, p2 + 4);
						sentenceId2 = Bytes.bytes2Int(b2, p2 + 8);
					}
				};
			}

			public Segmenter createSegmenter() {
				
				return new Segmenter() {
					public int recordLength(byte[] b, int p) {
						int l = 0;
						l += 4; // anid
						l += 4; // articleId
						l += 4; // sentenceId
						l += 1; // useForTraining
						// tokens.length
						int len = Bytes.bytes2Int(b, p + l);
						l += 4;
						// tokens: read unsigned short for length
						int utfLen = (((b[p+l] & 0xff) << 8) | (b[p+l+1] & 0xff));
						l += 2 + utfLen;
						// matchPos
						int lenMatches = b[p+l];
						l += 1; // num						
						l += 3*lenMatches;
						
						// features
						for (int i = 0; i < len; i++) {
							int fl = Bytes.bytes2Int(b, p + l);
							l += 4;
							l += fl*4;
						}
						return l;
					}					
				};
			}
    	});
    	
    	FileOperations.remove(output1p);
    	FileOperations.move(output1p + ".sorted", output1p);
	}
	
	private static String getDependencyAttribute(Dep[] id, int i) {
		HashMap<Integer,Dep> dep2Rel = new HashMap<Integer,Dep>();
		for (Dep d : id) {
			dep2Rel.put(d.dep_index, d);
		}
		
		// build up chain to root
		StringBuilder sb = new StringBuilder();
		
		Dep lastDep =  null;
		Dep rel = dep2Rel.get(i);
		while (rel != null) {
			sb.append(rel.gov_lemma + "_");
			i = rel.gov_index;
			lastDep = rel;
			rel = dep2Rel.get(i);
		}
		//if (lastDep != null) return "_" + lastDep.gov_lemma + "_";
		//else return "_ROOT_";
		return sb.toString();
	}
	
	private static String getDependencyAttribute3(Dep[] id, int i) {
		HashMap<Integer,Dep> dep2Rel = new HashMap<Integer,Dep>();
		for (Dep d : id) {
			dep2Rel.put(d.dep_index, d);
		}
		
		return dep2Rel.get(i).relation;
	}

	
	private static List<String> getDependencyAttributes(Dep[] id, int i) {
		List<String> fts = new ArrayList<String>();
		
		HashMap<Integer,Dep> dep2Rel = new HashMap<Integer,Dep>();
		for (Dep d : id) {
			dep2Rel.put(d.dep_index, d);
		}
		
		// build up chain to root
		StringBuilder sb = new StringBuilder();
		
		Dep lastDep =  null;
		Dep rel = dep2Rel.get(i);
		while (rel != null) {
			sb.append(rel.gov_lemma + "_" + rel.relation + "_");
			i = rel.gov_index;
			lastDep = rel;
			rel = dep2Rel.get(i);
			fts.add(sb.toString());
		}
		//if (lastDep != null) return "_" + lastDep.gov_lemma + "_";
		//else return "_ROOT_";
		return fts;
	}
	
	private static List<String> getDependencyAttributes2(Dep[] id, int i) {
		List<String> fts = new ArrayList<String>();
		
		HashMap<Integer,Dep> dep2Rel = new HashMap<Integer,Dep>();
		for (Dep d : id) {
			dep2Rel.put(d.dep_index, d);
		}
		HashMap<Integer,List<Dep>> gov2Rel = new HashMap<Integer,List<Dep>>();
		for (Dep d : id) {
			List<Dep> l = gov2Rel.get(d.gov_index);
			if (l == null) {
				l = new ArrayList<Dep>();
				gov2Rel.put(d.gov_index, l);
			}
			l.add(d);
		}

		// dependents become features
		List<Dep> dl = gov2Rel.get(i);
		if (dl != null) {
			for (Dep d : dl)
				fts.add("_DEP_" + d.dep_lemma);
		}
		
		// ancestors become features
		
		
		// build up chain to root
		//StringBuilder sb = new StringBuilder();
		
		Dep lastDep =  null;
		Dep rel = dep2Rel.get(i);
		while (rel != null) {
			fts.add("_ANC_" + rel.gov_lemma);
			//sb.append(rel.gov_lemma + "_" + rel.relation + "_");
			i = rel.gov_index;
			lastDep = rel;
			rel = dep2Rel.get(i);
			//fts.add(sb.toString());
		}
		//if (lastDep != null) return "_" + lastDep.gov_lemma + "_";
		//else return "_ROOT_";
		return fts;
	}

	
	private static int getFeatureId(String feature, HashMap<String, Integer> features) {
		Integer v = features.get(feature);
		if (v != null) return v;
		v = features.size();
		features.put(feature, v);
		return v;
	}	
	
	
	public static class IteratorTokenized {
		public int sentenceId;
		public int sectionId;
		public int articleId;
		public String sentence;
		public String[] tokens;
		public int[][] tokenPos;
		public boolean EOF = false;
		
		public static final int cs_sentenceId               = 0;
		public static final int cs_sectionId                = 1;
		public static final int cs_articleId                = 2;
		public static final int cs_sentence                 = 3;
		public static final int cs_tokens                   = 4;
		public static final int cs_tokenPos                 = 5;
		
		public boolean read(DelimitedReader r) throws IOException {
			String[] t = r.read();
			if (t==null) {
				EOF = true;
				return false;
			}
			sentenceId = Integer.parseInt(t[cs_sentenceId]);
			sectionId = Integer.parseInt(t[cs_sectionId]);
			articleId = Integer.parseInt(t[cs_articleId]);
			sentence = t[cs_sentence];
			tokens = StringUtil.split(t[cs_tokens], ' ', 512);
			String[] tp = StringUtil.split(t[cs_tokenPos], ' ', 512);
			tokenPos = new int[tp.length][2];
			for (int i=0; i < tp.length; i++) {
				String[] p = StringUtil.split(tp[i], ':', 2);
				tokenPos[i][0] = Integer.parseInt(p[0]);
				tokenPos[i][1] = Integer.parseInt(p[1]);
			}
			return true;
		}
	}
	
	static class IteratorParsed {
		public int sentenceId;
		public boolean parseSuccessful;
		public int length;
		public byte[] data;
		public Tree tree;
		public boolean EOF;
		
		public boolean read(DataInput di) throws Exception {
			try {
				sentenceId = di.readInt();
				parseSuccessful = di.readBoolean();
				length = di.readInt();
				data = new byte[length];
				di.readFully(data);
				ByteArrayInputStream bais = new ByteArrayInputStream(data);
				ObjectInputStream ois = new ObjectInputStream(bais);
				Serializable o = (Serializable)ois.readObject();
				ois.close();
				tree = (Tree)o;
				return true;
			} catch (EOFException e) { 
				EOF = true;
				return false; 
			} 
		}
	}
	
	static class IteratorDependencies {
		public int sentenceId;
		public boolean parseSuccessful;
		public int numDeps;
		public Dep[] deps;
		public boolean EOF = false;
		
		public boolean read(DataInput di) throws IOException {
			try {
				sentenceId = di.readInt();
				parseSuccessful = di.readBoolean();
				numDeps = di.readInt();
				deps = new Dep[numDeps];
			    for (int i=0; i < numDeps; i++) {
			    	Dep d = new Dep();
				    d.relation = di.readUTF();
				    d.gov_index = di.readInt();
				    d.gov_value = di.readUTF();
				    d.gov_lemma = di.readUTF();
				    d.dep_index = di.readInt();
				    d.dep_value = di.readUTF();
				    d.dep_lemma = di.readUTF();
				    deps[i] = d;
				}
				return true;
			} catch (EOFException e) {
				EOF = true;
				return false; 
			}
		}
	}
	
	static class Dep {
		public String relation;
		public  int gov_index;
		public String gov_value;
		public String gov_lemma;
		public int dep_index;
		public String dep_value;
		public String dep_lemma;
	}
}
