package wikiextract.nlp.trainingset.x;

import static wikiextract.nlp.trainingset.x.SecondPassLexicons.createIndexes;
import static wikiextract.nlp.trainingset.x.SecondPassLexicons.lexiconFeatures;
import static wikiextract.nlp.trainingset.x.SecondPassLexicons.seeds;
import static wikiextract.nlp.trainingset.x.SecondPassLexicons.weblists;
import static wikiextract.nlp.trainingset.x.SecondPassNumdists.createNumdistIndexes;
import static wikiextract.nlp.trainingset.x.SecondPassNumdists.gaussians;
import static wikiextract.nlp.trainingset.x.SecondPassNumdists.numdistFeatures;
import static wikiextract.nlp.trainingset.x.Settings.MAX_SENTENCES_PER_ARTICLE;
import static wikiextract.nlp.trainingset.x.Settings.NUM_CROSS_LEXICONS;
import static wikiextract.nlp.trainingset.x.Settings.evaluationPoints;
import static wikiextract.nlp.trainingset.x.Settings.features2ToCompute;
import static wikiextract.nlp.trainingset.x.Settings.repetitions;

import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;

import learning.data.SequenceDataReader;
import wikiextract.nlp.trainingset.x.Definitions.Feature2;
import wikiextract.nlp.trainingset.x.Definitions.ListType;
import wikiextract.nlp.trainingset.x.SecondPassLexicons.Lexicon;
import wikiextract.nlp.trainingset.x.SecondPassLexicons.LexiconFeature;
import wikiextract.nlp.trainingset.x.SecondPassNumdists.Numdist;
import wikiextract.nlp.trainingset.x.SecondPassNumdists.NumdistFeature;
import wikiextract.util.io.DelimitedReader;
//import static wikiextract.nlp.trainingset.x.Settings.SECONDPASS_RANDOMIZATION_ONLY;

public class RunSecondPass {
	
	static String dataDir = "/projects/pardosa/s2/raphaelh/data/all";
	//static String tmpDir = "/projects/pardosa/s2/raphaelh/tmp";
	//static String tmpDir = "/projects/pardosa/data08/raphaelh/tmp";
	static String tmpDir = "/projects/db9/raphaelh/nobackup/tmp";

	static String input1 = "learn/ft";
	static String input2 = "learn/ftIds";
	static String output1 = "learn/results";
	
	static List<String> featureIds;	
	static Map<String,Integer> secondFeatureIds;
	
	static Random random;
	static BufferedWriter bw;
	
	public static void main(String[] args) throws Exception {
		Settings.parse(args);
		input1 = tmpDir + "/" + input1 + "_" + Settings.firstPassSuffix();
		input2 = tmpDir + "/" + input2 + "_" + Settings.firstPassSuffix();
		output1 = tmpDir + "/" + output1 + "_" + Settings.firstPassSuffix() + "_" + 
			Settings.secondPassSuffix();
		
		///random = new Random(System.currentTimeMillis());
		random = new Random(Settings.RANDOM_SEED);

		SequenceDataReader.learnTransitionFeatures = 
			Settings.useFeature1(Definitions.Feature1.TRANSITIONS);
		
		readFeatureIds(input2 /*+ ".reduced"*/);
		
		bw = new BufferedWriter(new FileWriter(output1));
		DataInputStream is = new DataInputStream(new BufferedInputStream
				(new FileInputStream(input1 /*+ ".reduced"*/))); // + ".reduced")));
		FeaturesRecord io = new FeaturesRecord();
		
		int attributesNamesId = -1;
		//int articleId = -1;
		List<List<FeaturesRecord>> data = new ArrayList<List<FeaturesRecord>>();
		List<FeaturesRecord> curArticle = new ArrayList<FeaturesRecord>();

		int processed = 0;
		io.read(is);
		while (!io.EOF) {
			//if (io.attributesNamesId == 177758 &&io.sentenceId == 14381514) System.out.println("seeing test snt");
			
			if (curArticle.isEmpty() || curArticle.get(0).articleId == io.articleId) {
				if (curArticle.size() < MAX_SENTENCES_PER_ARTICLE) 
					curArticle.add(io.clone());
			} else {
				// do something with existing curArticle and then create
				// new curArticle
				if (data.isEmpty() || curArticle.get(0).attributesNamesId == data.get(0).get(0).attributesNamesId) {
					//if (testAnid2article.get(attributesNamesId).contains(curArticle.get(0).articleId))
					data.add(curArticle);
				} else {
					processSamples(data.get(0).get(0).attributesNamesId, data);
					processed++;
					data.clear();
					//if (processed==6) break;
					data.add(curArticle);
				}
				curArticle = new ArrayList<FeaturesRecord>();
				curArticle.add(io.clone());
			}
			io.read(is);
		}
		if (!data.isEmpty()) processSamples(data.get(0).get(0).attributesNamesId, data);
		is.close();
		bw.close();
	}
	
	private static boolean hasAnnotation(List<FeaturesRecord> snts) {
		boolean hasLabel = false;
		for (FeaturesRecord o : snts)
			if (!hasLabel) {
				//o.useForLearning = false;
				if (o.matchPos.length > 0) { hasLabel = true;  }
			} else {
				//o.useForLearning = false;
			}
		return hasLabel;
	}
	
	private static void readFeatureIds(String file) throws IOException {
		featureIds = new ArrayList<String>();
		secondFeatureIds = new HashMap<String,Integer>();
		DelimitedReader r = new DelimitedReader(file);
		String[] t = null;
		while ((t = r.read()) != null) {
			featureIds.add(t[1]);
		}
		r.close();
	}
	
	static int getSecondFeatureId(String feature, 
			Map<String, Integer> featuresMap, List<String> features) {
		Integer v = featuresMap.get(feature);
		if (v != null) return v;
		v = features.size();
		features.add(feature);
		featuresMap.put(feature, v);
		return v;
	}	
		
	private static void processSamples(int attributesNamesId, List<List<FeaturesRecord>> data) 
		throws IOException {

		// for now: keep only articles for which we have matches
		for (Iterator<List<FeaturesRecord>> i = data.iterator(); i.hasNext(); ) {
			List<FeaturesRecord> art = i.next();
			boolean hasLabel = false;
			for (FeaturesRecord o : art)
				if (!hasLabel) 
					if (o.matchPos.length > 0) { hasLabel = true;  }
			if (!hasLabel) i.remove();
		}

		// data.size() should be at least ep + [0.2*ep,
		
		int minTestSize = Math.max((int)(0.25*evaluationPoints[0]), 1);
		if (evaluationPoints[0] != -1 && data.size() < evaluationPoints[0] + minTestSize) {
			//System.out.println("  too few articles: " + data.size() + " articles");
			//return;
		} else {
			//System.out.println("processing " + attributesNamesId + ": " + data.size() + " articles");
		}
		
		for (int e = 0; e < evaluationPoints.length; e++) {
			int ep = evaluationPoints[e];

			// make sure we use same samples for all modes, so keep them extra
			List<List<List<FeaturesRecord>>> trainSamples = new ArrayList<List<List<FeaturesRecord>>>();
			List<List<List<FeaturesRecord>>> testSamples = new ArrayList<List<List<FeaturesRecord>>>();
			for (int r = 0; r < repetitions[e]; r++) {
				//if (SECONDPASS_RANDOMIZATION_ONLY) {
				//	PersistentRandomization.append(data);
				//	continue;
				//}
				//PersistentRandomization.shuffle(data);
				// sample ep data points
				Collections.shuffle(data, random);
				//int size = Math.min(ep, data.size()-1);
				//int trainSize = (int)(data.size()*4.0/5.0);
				int trainSize = evaluationPoints[e] == -1?
						(int)(data.size()*4.0/5.0) : evaluationPoints[e];
				if (data.size() - trainSize < 2) trainSize = (int)(data.size()*4.0/5.0);
						
				List<List<FeaturesRecord>> selected = new ArrayList<List<FeaturesRecord>>(trainSize);
				selected.addAll(data.subList(0, trainSize));
				//int otherSize = data.size() - size;
				int otherSize = data.size() - trainSize;
				List<List<FeaturesRecord>> notSelected = new ArrayList<List<FeaturesRecord>>(otherSize);
				notSelected.addAll(data.subList(trainSize, trainSize + otherSize));
				
				// test set is 1/5, rest is training
				
				
				trainSamples.add(selected);
				testSamples.add(notSelected);
			}

			for (int m = 0; m < features2ToCompute.length; m++) {
				Avg f1 = new Avg();
				Avg pr = new Avg();
				Avg re = new Avg();
				for (int r = 0; r < repetitions[e]; r++) {
					double[] results = process(trainSamples.get(r),
							testSamples.get(r),
							features2ToCompute[m]);
					f1.add(results[0]);
					pr.add(results[1]);
					re.add(results[2]);
				}
				String resultStr = attributesNamesId + "\t" + data.size() + "\t" + ep + "\t" +
				features2ToCompute[m].name()+ "\t" +  f1.avg() + "\t" + pr.avg() + "\t" + re.avg();
				
				bw.append(resultStr + "\n");
				bw.flush();
				System.out.println(resultStr);
			}
		}		
	}
	
	private static double[] process(List<List<FeaturesRecord>> trainSamples,
			List<List<FeaturesRecord>> testSamples, Feature2 mode) 
		throws IOException {

		// create lexicons
		List<LexiconFeature> lexicons = new ArrayList<LexiconFeature>();
		{
			LexiconFeature fsLex = null, lsLex = null, ftLex = null;
			// add seed lexicons
			if (mode == Feature2.ALLVALUES || mode == Feature2.WEB) {
				if (Settings.useListType(ListType.FIRST_SEGMENT)) {
					seeds(trainSamples, lexicons, "_seedlistFirst", 1, 0);
					fsLex = lexicons.get(lexicons.size()-1);
				}
				if (Settings.useListType(ListType.LAST_SEGMENT)) {
					seeds(trainSamples, lexicons, "_seedlistLast", 2, 0);
					lsLex = lexicons.get(lexicons.size()-1);
				}
				if (Settings.useListType(ListType.FIRST_TOKEN)) {
					seeds(trainSamples, lexicons, "_seedlistFirstWord", 1, 1);
					ftLex = lexicons.get(lexicons.size()-1);
				}
			}
			
			if (mode == Feature2.WEB) {
				if (Settings.useListType(ListType.FIRST_SEGMENT))
					weblists(fsLex, lexicons, "_weblistFirst");
				if (Settings.useListType(ListType.LAST_SEGMENT))
					weblists(lsLex, lexicons, "_weblistLast");		
				if (Settings.useListType(ListType.FIRST_TOKEN))
					weblists(ftLex, lexicons, "_weblistFirstWord");		
			}
			createIndexes(lexicons);
		}
		
		// create gaussian predictors
		List<NumdistFeature> numdists = new ArrayList<NumdistFeature>();
		{
			if (mode == Feature2.GAUSSIAN) {
				gaussians(trainSamples, numdists, "_gaussianFirst", 1, 0);
			}
			createNumdistIndexes(numdists);
		}

		return createFeaturesSecondPass(trainSamples, testSamples, 
				lexicons, numdists);
	}
	
	private static double[] createFeaturesSecondPass(List<List<FeaturesRecord>> train,
			List<List<FeaturesRecord>> test, List<LexiconFeature> lexicons,
			List<NumdistFeature> numdists) 
		throws IOException {
		
		CRFTrainer ct = new CRFTrainer();
		
		boolean[] featuresUsed = new boolean[featureIds.size()];
		StringBuilder sbTrainData = new StringBuilder();
		StringBuilder sbTestData = new StringBuilder();

		for (int ar = 0; ar < train.size(); ar++) {
			//boolean useForTraining
			for (FeaturesRecord o : train.get(ar)) {
				if (!o.useForLearning) continue;
				sbTrainData.append("[ articleId:" + ar + " ]\n"); // import for scoring
				List<Integer>[] secondPassFeatures = copy(o.features);
				// add lexicon features
				for (LexiconFeature lf : lexicons) {
					Lexicon lex = !Settings.LEXICON_CROSSTRAINING? lf.lexicons.train :
						lf.lexicons.chunkTrain.get(ar % NUM_CROSS_LEXICONS);
					lexiconFeatures(o, false, lex.index, 
						secondPassFeatures, lf.featureId);
				}
				for (NumdistFeature nf : numdists) {
					Numdist nd = !Settings.LEXICON_CROSSTRAINING? nf.numdists.train :
						nf.numdists.chunkTrain.get(ar % NUM_CROSS_LEXICONS);
					numdistFeatures(o, false, nd.index, 
							secondPassFeatures, nf.featureId);
				}
				
				writeSecondPassFeatures(o, secondPassFeatures, sbTrainData,
						featuresUsed);
			}
		}
		
		for (int ar = 0; ar < test.size(); ar++) {
			//boolean useForTraining
			for (FeaturesRecord o : test.get(ar)) {
				sbTestData.append("[ articleId:" + ar + " ]\n");				
				List<Integer>[] secondPassFeatures = copy(o.features);
				// add lexicon features
				for (LexiconFeature lf : lexicons)
					lexiconFeatures(o, false, lf.lexicons.train.index, 
						secondPassFeatures, lf.featureId);
				for (NumdistFeature nf : numdists)
					numdistFeatures(o, false, nf.numdists.train.index, 
							secondPassFeatures, nf.featureId);
				writeSecondPassFeatures(o, secondPassFeatures, sbTestData,
						featuresUsed);
			}
		}
		
		String usedFeatures = usedFeatures(featuresUsed);
		String lbls = "0\t0\n1\t1\n";
		
		Reader trData = new StringReader(sbTrainData.toString());
		Reader trFeatures = new StringReader(usedFeatures);
		Reader trLabels = new StringReader(lbls);
		
		Reader teData = new StringReader(sbTestData.toString());
		Reader teFeatures = new StringReader(usedFeatures);
		Reader teLabels = new StringReader(lbls);
		
		ct.addCV(trData, trLabels, trFeatures, teData, teLabels, teFeatures);
		/*
		String s = "_";// + i;			
		writeString(tmpDir + "/train" + s + ".data", sbTrainData.toString());
		writeFeatures(tmpDir + "/train" + s + ".fts", featuresUsed);
		writeString(tmpDir + "/train" + s + ".labels", lbls);

		writeString(tmpDir + "/test" + s + ".data", sbTestData.toString());
		writeFeatures(tmpDir + "/test" + s + ".fts", featuresUsed);
		writeString(tmpDir + "/test" + s + ".labels", lbls);

		WriteDebugFiles.main(null);
		if (1==1) System.exit(-1);

		/*
		writeLexicons(tmpDir + "/lexicon", i, lexicons);
		*/
		return ct.getCrossValidationResults();
	}

	/*
	private static void writeFeatures(String file, boolean[] featuresUsed) 
		throws IOException {
		DelimitedWriter w = new DelimitedWriter(file);
		for (int i=0; i < featureIds.size(); i++)
			if (featuresUsed[i])
				w.write(i + "", featureIds.get(i));
		w.close();
	}

	private static void writeString(String file, String data) throws IOException {
		BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "utf-8"));
		bw.append(data);
		bw.close();
	}
	*/
	
	private static void writeSecondPassFeatures(FeaturesRecord o, List<Integer>[] fts, StringBuilder sb,
			boolean[] featuresUsed) {
		int[] labels = new int[o.tokens.length];
		for (byte[] l : o.matchPos)
			for (int i=l[0]; i < l[1]; i++) labels[i] = 1;

		for (int i=0; i < o.tokens.length; i++) {
			sb.append(o.tokens[i] + " " + labels[i]);
			Collections.sort(fts[i]);
			
			for (int in : fts[i]) {
				sb.append(" " + in + ":1");
				featuresUsed[in] = true;
			}
			sb.append("\n");
		}
		sb.append("\n");
	}
	
	private static String usedFeatures(boolean[] featuresUsed) {
		StringBuilder sb = new StringBuilder();
		for (int i=0; i < featureIds.size(); i++)
			if (featuresUsed[i])
				sb.append(i + "\t" + featureIds.get(i) + "\n");
		return sb.toString();
	}
		
	@SuppressWarnings("unchecked")
	private static List<Integer>[] copy(List<Integer>[] fts) {
		List<Integer>[] n = new List[fts.length];
		for (int i=0; i < fts.length; i++)			
			n[i] = new ArrayList<Integer>(fts[i]);
		return n;
	}

	static class Avg {
		double sum = 0;
		double count = 0;
		void add(double d) {
			sum += d;
			count++;
		}
		double avg() {
			return sum / count;
		}
	}	
}
