package wikiextract.nlp.trainingset.x;

import static wikiextract.nlp.trainingset.x.SecondPassLexicons.createIndexes;
import static wikiextract.nlp.trainingset.x.SecondPassLexicons.lexiconFeatures;
import static wikiextract.nlp.trainingset.x.SecondPassLexicons.seeds;
import static wikiextract.nlp.trainingset.x.SecondPassLexicons.weblists;
import static wikiextract.nlp.trainingset.x.SecondPassNumdists.createNumdistIndexes;
import static wikiextract.nlp.trainingset.x.SecondPassNumdists.gaussians;
import static wikiextract.nlp.trainingset.x.SecondPassNumdists.numdistFeatures;
import static wikiextract.nlp.trainingset.x.Settings.MAX_ARTICLES_PER_ATTRIBUTE;
import static wikiextract.nlp.trainingset.x.Settings.MAX_SENTENCES_PER_ARTICLE;
import static wikiextract.nlp.trainingset.x.Settings.NUM_CROSS_LEXICONS;
import static wikiextract.nlp.trainingset.x.Settings.features2ToCompute;

import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;

import learning.data.SequenceDataReader;
import wikiextract.nlp.trainingset.x.Definitions.Feature2;
import wikiextract.nlp.trainingset.x.SecondPassLexicons.LexiconFeature;
import wikiextract.nlp.trainingset.x.SecondPassNumdists.NumdistFeature;
import wikiextract.util.io.DelimitedReader;
import wikiextract.util.io.*;

public class zclSecondPass {

	static String dataDir = "O:/unix/projects/pardosa/s2/raphaelh/data/all";
	// static String tmpDir = "/projects/pardosa/s2/raphaelh/tmp";
	// static String tmpDir = "/projects/pardosa/data07/raphaelh/tmp";
	// static String tmpDir =
	// "O:\\unix\\projects\\pardosa\\data08\\raphaelh\\tmp";
	// static String tmpDir = "/projects/pardosa/data10/raphaelh/tmp";
	// static String tmpDir =
	// "O:\\unix\\projects\\pardosa\\data10\\raphaelh\\tmp";
	// static String tmpDir = "/projects/pardosa/data11/raphaelh/tmp";
	// static String tmpDir = "/projects/pardosa/data12/raphaelh/tmp";
	// static String tmpDir = "/projects/db9/raphaelh/nobackup/tmp";
	static String tmpDir = "O:/unix/projects/pardosa/s2/clzhang/raphaelh/cluster_extractor/tmp";

	static String input1 = "learn/ft";
	static String input2 = "learn/ftIds";
	static String output1 = "learn/results";

	// static List<String> featureIds;
	// static Map<String,Integer> secondFeatureIds;

	static Random random;
	static BufferedWriter bw;

	static HashSet<Integer> trainIdsSet = new HashSet<Integer>();

	
	// static AttributeDataset trainData = null;
	// static List<AttributeDataset> testData = new
	// ArrayList<AttributeDataset>();

	static class AttributeDataset {
		String name;
		// int attributesNamesId;
		List<List<FeaturesRecord>> data;

		public AttributeDataset(String name, List<List<FeaturesRecord>> data) {
			// public AttributeDataset(int attributesNamesId,
			// List<List<FeaturesRecord>> data) {
			// this.attributesNamesId = attributesNamesId;
			this.name = name;
			this.data = data;
		}

		public void add(AttributeDataset other) {
			this.name = this.name + "-" + other.name;
			this.data.addAll(other.data);
		}
	}

	public static void setPath(String dataDir,String tmpDir){
		zclSecondPass.dataDir = dataDir;
		zclSecondPass.tmpDir = tmpDir;
	}

	public static void clusterTrain(int []train_id, int []test_id, int maxArticle,String outputfileprefix)throws Exception{
		input1 = "learn/ft";
		input2 = "learn/ftIds";
		
		input1 = tmpDir + "/" + input1 + "_" + Settings.firstPassSuffix();
		input2 = tmpDir + "/" + input2 + "_" + Settings.firstPassSuffix();
		output1 = tmpDir + "/" + output1 + "_" + Settings.firstPassSuffix()
				+ "_" + Settings.secondPassSuffix();

		// /random = new Random(System.currentTimeMillis());
		random = new Random(Settings.RANDOM_SEED);

		SequenceDataReader.learnTransitionFeatures = Settings
				.useFeature1(Definitions.Feature1.TRANSITIONS);
		HashSet<Integer>train_id_set = new HashSet<Integer>();
		HashSet<Integer>test_id_set = new HashSet<Integer>();
		String outputfile = outputfileprefix;
		
		for(int i:train_id){
			train_id_set.add(i);
			outputfile+="-"+i; 
		}
		
		for(int i:test_id){
			test_id_set.add(i);
		}
		
		readFeatureIds(input2);
        
		bw = new BufferedWriter(new FileWriter(outputfile));

		AttributeDatasetIterator trainIt = new AttributeDatasetIterator(input1
				+ ".reduced");
		AttributeDataset trDs;
		AttributeDataset mergeTrain = null;
		List<AttributeDataset>testDataSet = new ArrayList<AttributeDataset>();
		while ((trDs = trainIt.next()) != null) {
			int raphid = Integer.parseInt(trDs.name);
			if (train_id_set.contains(raphid) && !test_id_set.contains(raphid)) {
				if (mergeTrain == null) {
					mergeTrain = new AttributeDataset(trDs.name,trDs.data);
				}else{
					mergeTrain.add(trDs);
				}
			}else if(train_id_set.contains(raphid) && test_id_set.contains(raphid)){
				//System.out.println("test case size\t"+trDs.data.size());
				Collections.shuffle(trDs.data);
				List<List<FeaturesRecord>> split_data_for_train = new ArrayList<List<FeaturesRecord>>();
				List<List<FeaturesRecord>> split_data_for_test = new ArrayList<List<FeaturesRecord>>();
				int i=0;
				int testcase_num = (int)Math.max(5, trDs.data.size()*Settings.cv_test_train_ratio);
				for(;i<testcase_num && i<trDs.data.size();i++){
					split_data_for_test.add(trDs.data.get(i));
				}
				for(;i<trDs.data.size();i++){
					split_data_for_train.add(trDs.data.get(i));
				}
				
				if (mergeTrain == null) {
					mergeTrain = new AttributeDataset(trDs.name,split_data_for_train);
				}else{
					mergeTrain.add(trDs);
				}
				testDataSet.add(new AttributeDataset(trDs.name,split_data_for_test));
			}else if(!train_id_set.contains(raphid) && test_id_set.contains(raphid)){
				testDataSet.add(new AttributeDataset(trDs.name,trDs.data));
			}
			
			// if (c >= from && c < to) {
			// //trainData = trDs;

			//				
			// bw.flush();
			// }
			// c++;
		}
		
		processSamples(mergeTrain,maxArticle,testDataSet);
		System.out.print(mergeTrain.name + " ");
		System.out.println(mergeTrain.data.size()+"\t"+testDataSet.size()+"\t"+testDataSet.get(0).data.size());
		bw.close();
		trainIt.close();
		
	}
	public static void main(String[] args) throws Exception {
		Settings.parse(args);
		input1 = tmpDir + "/" + input1 + "_" + Settings.firstPassSuffix();
		input2 = tmpDir + "/" + input2 + "_" + Settings.firstPassSuffix();
		output1 = tmpDir + "/" + output1 + "_" + Settings.firstPassSuffix()
				+ "_" + Settings.secondPassSuffix();

		// /random = new Random(System.currentTimeMillis());
		random = new Random(Settings.RANDOM_SEED);

		SequenceDataReader.learnTransitionFeatures = Settings
				.useFeature1(Definitions.Feature1.TRANSITIONS);

		// train: {1, 4, 5} <-- combine training data from attribs 1,4,5
		// test: {2, 9, 29} <-- separately test against 2, 9, 29

		// int CHUNK = 10;
		// int CHUNK_SIZE = 500;
		int CHUNK = 1;
		int CHUNK_SIZE = 32;

		readFeatureIds(input2);
                                                                                                                          
		bw = new BufferedWriter(new FileWriter(output1 + "_" + CHUNK));

		AttributeDatasetIterator trainIt = new AttributeDatasetIterator(input1
				+ ".reduced");
		AttributeDataset trDs;
		AttributeDataset mergeTrain = null;
		DelimitedWriter dw = new DelimitedWriter("O:/unix/projects/pardosa/s2/clzhang/raphaelh/cluster_extractor/rphid_articlesize");
		while ((trDs = trainIt.next()) != null) {
			int raphid = Integer.parseInt(trDs.name);
			int size = trDs.data.size();
			dw.write(raphid+"",size+"");
			System.out.println(raphid+"\t"+size);
		}
		dw.close();
		//processSamples(mergeTrain,Settings.MAX_ARTICLES_PER_ATTRIBUTE);
		bw.close();
		trainIt.close();
	}

	static class AttributeDatasetIterator {

		DataInputStream is = null;
		FeaturesRecord io = new FeaturesRecord();

		public AttributeDatasetIterator(String input) throws IOException {
			is = new DataInputStream(new BufferedInputStream(
					new FileInputStream(input), 32 * 1024));
			io.read(is);
		}

		public AttributeDataset next() throws IOException {
			if (io.EOF)
				return null;

			int attributesNamesId = io.attributesNamesId;

			List<List<FeaturesRecord>> data = new ArrayList<List<FeaturesRecord>>();
			List<FeaturesRecord> curArticle = new ArrayList<FeaturesRecord>();

			int art = 0;
			while (!io.EOF) {
				if (curArticle.isEmpty()
						|| curArticle.get(0).articleId == io.articleId) {
					if (curArticle.size() < MAX_SENTENCES_PER_ARTICLE)
						curArticle.add(io.clone());
				} else {
					// do something with existing curArticle and then create
					// new curArticle
					if (!hasAnnotation(curArticle)) {
						curArticle.clear();
						curArticle.add(io.clone());
						attributesNamesId = io.attributesNamesId;
						// THERE WAS A BUG IN HERE:
						io.read(is); // / THIS WAS MISSING

						continue;
					}

					// max number of articles per attribute
					if (data.size() < MAX_ARTICLES_PER_ATTRIBUTE) {
						data.add(curArticle);
					} else {
						art++;
						// pick one element to swap with last
						int p = random.nextInt(art);
						if (p < MAX_ARTICLES_PER_ATTRIBUTE)
							data.set(p, curArticle);
					}

					if (io.attributesNamesId != attributesNamesId) {
						art = 0;
						return new AttributeDataset(attributesNamesId + "",
								data);
					}

					curArticle = new ArrayList<FeaturesRecord>();
					curArticle.add(io.clone());
				}
				io.read(is);
			}
			if (!data.isEmpty())
				return new AttributeDataset(attributesNamesId + "", data);
			return null;
		}

		public void close() throws IOException {
			is.close();
		}
	}

	private static boolean hasAnnotation(List<FeaturesRecord> snts) {
		boolean hasLabel = false;
		for (FeaturesRecord o : snts)
			if (!hasLabel) {
				// o.useForLearning = false;
				if (o.matchPos.length > 0) {
					hasLabel = true;
				}
			} else {
				// o.useForLearning = false;
			}
		return hasLabel;
	}

	private static void readFeatureIds(String file) throws IOException {
		RunSecondPass.featureIds = new ArrayList<String>();
		RunSecondPass.secondFeatureIds = new HashMap<String, Integer>();
		DelimitedReader r = new DelimitedReader(file);
		String[] t = null;
		while ((t = r.read()) != null) {
			RunSecondPass.featureIds.add(t[1]);
		}
		r.close();
	}

	static int getSecondFeatureId(String feature,
			Map<String, Integer> featuresMap, List<String> features) {
		Integer v = featuresMap.get(feature);
		if (v != null)
			return v;
		v = features.size();
		features.add(feature);
		featuresMap.put(feature, v);
		return v;
	}

	private static void processSamples(AttributeDataset trainData, int maximum_train, List<AttributeDataset>testset)
			throws IOException {
		Collections.shuffle(trainData.data, random);
		if(trainData.data.size()>maximum_train){
			for(int i=trainData.data.size()-1;i>=maximum_train;i--){
				trainData.data.remove(i);
			}
		}
		process(trainData, features2ToCompute[0],testset);
	}

	private static void process(AttributeDataset trainSamples, Feature2 mode,List<AttributeDataset>testset)
			throws IOException {

		// create lexicons
		List<LexiconFeature> lexicons = new ArrayList<LexiconFeature>();
		{
			// add seed lexicons
			if (mode == Feature2.ALLVALUES || mode == Feature2.WEB) {
				seeds(trainSamples.data, lexicons, "_seedlistFirst", 1, 0);
				// seeds(trainSamples, lexicons, "_seedlistLast", 2, 0);
				// seeds(trainSamples, lexicons, "_seedlistFirsWord", 1, 1);
			}

			if (mode == Feature2.WEB) {
				weblists(lexicons.get(0), lexicons, "_weblistFirst");
				// weblists(lexicons.get(1), lexicons, "_weblistLast");
				// weblists(lexicons.get(2), lexicons, "_weblistFirstWord");
			}
			createIndexes(lexicons);
		}

		// create gaussian predictors
		List<NumdistFeature> numdists = new ArrayList<NumdistFeature>();
		{
			if (mode == Feature2.GAUSSIAN) {
				gaussians(trainSamples.data, numdists, "_gaussianFirst", 1, 0);
			}
			createNumdistIndexes(numdists);
		}

		createFeaturesSecondPass(trainSamples, lexicons, numdists,testset);
	}

	private static void createFeaturesSecondPass(AttributeDataset train,
			List<LexiconFeature> lexicons, List<NumdistFeature> numdists,List<AttributeDataset>testset)
			throws IOException {

		CRFTrainer ct = new CRFTrainer();

		boolean[] featuresUsed = new boolean[RunSecondPass.featureIds.size()];
		StringBuilder sbTrainData = new StringBuilder();
		List<StringBuilder> sbTestDataList = new ArrayList<StringBuilder>();

		for (int ar = 0; ar < train.data.size(); ar++) {
			// boolean useForTraining
			for (FeaturesRecord o : train.data.get(ar)) {
				if (!o.useForLearning)
					continue;
				sbTrainData.append("[ articleId:" + ar + " ]\n"); // import for
																	// scoring
				List<Integer>[] secondPassFeatures = copy(o.features);
				// add lexicon features
				for (LexiconFeature lf : lexicons)
					lexiconFeatures(o, false, lf.lexicons.chunkTrain.get(ar
							% NUM_CROSS_LEXICONS).index, secondPassFeatures,
							lf.featureId);
				for (NumdistFeature nf : numdists)
					numdistFeatures(o, false, nf.numdists.chunkTrain.get(ar
							% NUM_CROSS_LEXICONS).index, secondPassFeatures,
							nf.featureId);

				writeSecondPassFeatures(o, secondPassFeatures, sbTrainData,
						featuresUsed);
			}
		}

		String usedFeatures = usedFeatures(featuresUsed);
		String lbls = "0\t0\n1\t1\n";

		Reader trData = new StringReader(sbTrainData.toString());
		Reader trFeatures = new StringReader(usedFeatures);
		Reader trLabels = new StringReader(lbls);

		System.out.println("TRAINING " + train.name);
		ct.train(trData, trLabels, trFeatures);

		// System.out.println(sbTrainData.toString());

		AttributeDatasetIterator testIt = new AttributeDatasetIterator(input1
				+ ".reduced");
		//AttributeDataset testData;
		System.out.println("Start testing "+(new java.util.Date()));
		for (AttributeDataset testData: testset) {
			//if (!testIdsSet.contains(Integer.parseInt(testData.name)))
			//	continue;
			// System.out.println("creating features " + testData.name);
			StringBuilder sbTestData = new StringBuilder();
			for (int ar = 0; ar < testData.data.size(); ar++) {
				// boolean useForTraining
				for (FeaturesRecord o : testData.data.get(ar)) {
					sbTestData.append("[ articleId:" + ar + " ]\n");
					List<Integer>[] secondPassFeatures = copy(o.features);
					// add lexicon features
					for (LexiconFeature lf : lexicons)
						lexiconFeatures(o, false, lf.lexicons.train.index,
								secondPassFeatures, lf.featureId);
					for (NumdistFeature nf : numdists)
						numdistFeatures(o, false, nf.numdists.train.index,
								secondPassFeatures, nf.featureId);
					writeSecondPassFeatures(o, secondPassFeatures, sbTestData,
							featuresUsed);
				}
			}

			Reader teData = new StringReader(sbTestData.toString());
			Reader teFeatures = new StringReader(usedFeatures);
			Reader teLabels = new StringReader(lbls);

			// System.out.println("testing " + train.name + " " +
			// testData.name);
			ct.test(teData, teLabels, teFeatures);
			// System.out.println(sbTestData.toString());
			double[] re = ct.getCrossValidationResults();

			String r = train.name + "\t" + testData.name + "\t"
					+ testData.data.size() + "\t" + re[0] + "\t" + re[1] + "\t"
					+ re[2];
			// System.out.println(r);
			bw.append(r + "\n");
			ct.resetTest();
			// System.out.println("done " + testData.name);
		}
		testIt.close();
		// System.out.println("DONE TRAINING " + train.name);

	}

	/*
	 * private static void writeFeatures(String file, boolean[] featuresUsed)
	 * throws IOException { DelimitedWriter w = new DelimitedWriter(file); for
	 * (int i=0; i < featureIds.size(); i++) if (featuresUsed[i]) w.write(i +
	 * "", featureIds.get(i)); w.close(); }
	 * 
	 * private static void writeString(String file, String data) throws
	 * IOException { BufferedWriter bw = new BufferedWriter(new
	 * OutputStreamWriter(new FileOutputStream(file), "utf-8"));
	 * bw.append(data); bw.close(); }
	 */

	private static void writeSecondPassFeatures(FeaturesRecord o,
			List<Integer>[] fts, StringBuilder sb, boolean[] featuresUsed) {
		int[] labels = new int[o.tokens.length];
		for (byte[] l : o.matchPos)
			for (int i = l[0]; i < l[1]; i++)
				labels[i] = 1;

		for (int i = 0; i < o.tokens.length; i++) {
			sb.append(o.tokens[i] + " " + labels[i]);
			Collections.sort(fts[i]);

			for (int in : fts[i]) {
				sb.append(" " + in + ":1");
				featuresUsed[in] = true;
			}
			sb.append("\n");
		}
		sb.append("\n");
	}

	private static String usedFeatures(boolean[] featuresUsed) {
		StringBuilder sb = new StringBuilder();
		for (int i = 0; i < RunSecondPass.featureIds.size(); i++)
			if (featuresUsed[i])
				sb.append(i + "\t" + RunSecondPass.featureIds.get(i) + "\n");
		return sb.toString();
	}

	@SuppressWarnings("unchecked")
	private static List<Integer>[] copy(List<Integer>[] fts) {
		List<Integer>[] n = new List[fts.length];
		for (int i = 0; i < fts.length; i++)
			n[i] = new ArrayList<Integer>(fts[i]);
		return n;
	}

	static class Avg {
		double sum = 0;
		double count = 0;

		void add(double d) {
			sum += d;
			count++;
		}

		double avg() {
			return sum / count;
		}
	}
}
