package wikiextract.nlp.trainingset.x;

import static wikiextract.nlp.trainingset.x.Settings.MAX_ARTICLES_PER_ATTRIBUTE;
import static wikiextract.nlp.trainingset.x.Settings.MAX_SENTENCES_PER_ARTICLE;

import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;

import learning.data.SequenceDataReader;
import wikiextract.util.io.DelimitedReader;
import wikiextract.util.io.DelimitedWriter;

public class ReduceTrainingSetSize {
	
	static String dataDir = "/projects/pardosa/s2/raphaelh/data/all";
	//static String tmpDir = "/projects/pardosa/s2/raphaelh/tmp";
	static String tmpDir = "/projects/pardosa/data08/raphaelh/tmp";

	static String input1 = "learn/ft";
	static String input2 = "learn/ftIds";
	
	static Random random;
	
	public static void main(String[] args) throws Exception {
		Settings.parse(args);
		input1 = tmpDir + "/" + input1 + "_" + Settings.firstPassSuffix();
		input2 = tmpDir + "/" + input2 + "_" + Settings.firstPassSuffix();
	
		System.out.println("reducing " + input1);
		random = new Random(Settings.RANDOM_SEED);

		SequenceDataReader.learnTransitionFeatures = 
			Settings.useFeature1(Definitions.Feature1.TRANSITIONS);

		{
			DataInputStream is = new DataInputStream(new BufferedInputStream
					(new FileInputStream(input1)));
			DataOutputStream os = new DataOutputStream(new BufferedOutputStream
					(new FileOutputStream(input1 + ".reduced.tmp")));
			FeaturesRecord io = new FeaturesRecord();
			
			int attributesNamesId = -1;
			List<List<FeaturesRecord>> data = new ArrayList<List<FeaturesRecord>>();
			List<FeaturesRecord> curArticle = new ArrayList<FeaturesRecord>();
			
			int art = 0;
			io.read(is);
			while (!io.EOF) {
				if (curArticle.isEmpty() || curArticle.get(0).articleId == io.articleId) {
					if (curArticle.size() < MAX_SENTENCES_PER_ARTICLE) 
						curArticle.add(io.clone());
				} else {
					// do something with existing curArticle and then create
					// new curArticle
					if (!hasAnnotation(curArticle)) {
						curArticle.clear();
						curArticle.add(io.clone());
						continue;
					}
					
					if (io.attributesNamesId == attributesNamesId) {
							
						// max number of articles per attribute
						if (data.size() < MAX_ARTICLES_PER_ATTRIBUTE) { 
							data.add(curArticle);
						} else {
							art++;
							// pick one element to swap with last
							int p = random.nextInt(art);
							if (p < MAX_ARTICLES_PER_ATTRIBUTE)
								data.set(p, curArticle);
						}
					} else {
						art = 0;
	
						if (attributesNamesId != -1) {
							for (List<FeaturesRecord> a : data) {
								for (FeaturesRecord f : a) {
									f.write(os);
								}
							}
							
							//processSamples(attributesNamesId, data);
							data.clear();
						}
						
						List<FeaturesRecord> d = new ArrayList<FeaturesRecord>();
						data.add(d);
						d.add(io.clone());
					}
					curArticle = new ArrayList<FeaturesRecord>();
					curArticle.add(io.clone());
					attributesNamesId = io.attributesNamesId;
				}
				io.read(is);
			}
			for (List<FeaturesRecord> a : data) {
				for (FeaturesRecord f : a) {
					f.write(os);
				}
			}
	
			is.close();
			os.close();
		}
		
		// compute reduced feature set size
		{
			HashMap<Integer,Integer> old2new = new HashMap<Integer,Integer>();
		    
			DataInputStream is = new DataInputStream(new BufferedInputStream
					(new FileInputStream(input1 + ".reduced.tmp")));
			DataOutputStream os = new DataOutputStream(new BufferedOutputStream
					(new FileOutputStream(input1 + ".reduced")));
			FeaturesRecord io = new FeaturesRecord();
			
			io.read(is);
			while (!io.EOF) {
				for (List<Integer> tokenFeatures : io.features) {
					for (int i = 0; i < tokenFeatures.size(); i++) {
						int v = tokenFeatures.get(i);
						Integer n = old2new.get(v);
						if (n==null) {
							n = old2new.size();
							old2new.put(v, n);
						}
						tokenFeatures.set(i, n);
					}
				}	
				io.write(os);
				io.read(is);
			}
			is.close();
			os.close();
			
		    // read features
			List<String> old = new ArrayList<String>();
		    DelimitedReader r = new DelimitedReader(input2);
		    String[] t = null;
		    while ((t = r.read())!= null) {
		    	old.add(t[1]);
		    }
		    r.close();
		    
		    
		    DelimitedWriter w = new DelimitedWriter(input2 + ".reduced");
		    List<Map.Entry<Integer,Integer>> fl = new ArrayList<Map.Entry<Integer,Integer>>();
		    fl.addAll(old2new.entrySet());
		    Collections.sort(fl, new Comparator<Map.Entry<Integer,Integer>>() { 
		    	public int compare(Map.Entry<Integer,Integer> a, Map.Entry<Integer,Integer> b) { return a.getValue() - b.getValue(); } });	    
		    for (Map.Entry<Integer,Integer> e : fl)
		    	w.write(e.getValue() + "", old.get(e.getKey()));
		    w.close();
		}		
	}
	
	private static boolean hasAnnotation(List<FeaturesRecord> snts) {
		boolean hasLabel = false;
		for (FeaturesRecord o : snts)
			if (!hasLabel) {
				//o.useForLearning = false;
				if (o.matchPos.length > 0) { hasLabel = true;  }
			} else {
				//o.useForLearning = false;
			}
		return hasLabel;
	}
}
