package learning.data;

import java.io.BufferedWriter;
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.List;

import learning.data.document.SequenceDocument;
import learning.util.DynamicIntArray;
import learning.util.FileOperations;

public class BalancedClassResampler {
	
	// careful! don't do cross-validation on an over-sampled dataset!
	
	public enum Strategy { UNDER_SAMPLE, OVER_SAMPLE };
	
	// the minority class should get a weight between 0.5 and 0.9
	static int minorityClassLabel = 1;
	static float minorityClassWeight = .5f;	
	static Strategy rebalanceStrategy = Strategy.OVER_SAMPLE;
	
	static String infoboxClass = "comedian";
	static String attribute = "nationality";
	
	static String input = "O:/unix/projects/pardosa/data10/raphaelh/learning/" + infoboxClass + "_" + attribute;
	static String output = "O:/unix/projects/pardosa/data10/raphaelh/learning/" + infoboxClass + "_" + attribute + "_rebalanced";
	
	public static void main(String[] args) throws Exception {
		
		Dataset<SequenceDocument> data = SequenceDataReader.read(input);
		List<SequenceDocument> newData = new ArrayList<SequenceDocument>();
		
		DynamicIntArray majorityExamples = new DynamicIntArray();
		DynamicIntArray minorityExamples = new DynamicIntArray();
		
		for (int d = 0; d < data.numDocs(); d++) {
			SequenceDocument doc = data.doc(d);
			boolean me = false;
			for (int i=0; i < doc.labels.length; i++)
				if (doc.labels[i] == minorityClassLabel)
					me = true;
			if (me) 
				minorityExamples.append(d);
			else
				majorityExamples.append(d);
		}
		
		if (rebalanceStrategy == Strategy.OVER_SAMPLE) {
			
			int minorityTargetSize = 
				(int)(majorityExamples.length() * (minorityClassWeight / (1-minorityClassWeight)));
			
			int i = 0;
			while (newData.size() < minorityTargetSize) {
				//maybe add random sampling here in the future
				newData.add(data.doc(minorityExamples.get(i)));
				i = ++i % minorityExamples.length();
			}
			
			for (int j=0; j < majorityExamples.length(); j++)
				newData.add(data.doc(majorityExamples.get(j)));
			
		}
		
		if (rebalanceStrategy == Strategy.UNDER_SAMPLE) {
			
			int majorityTargetSize = 
				(int)(minorityExamples.length() * (1-minorityClassWeight / minorityClassWeight));
			
			int i=0;
			while (newData.size() < majorityTargetSize) {
				// maybe add random sampling here in the future
				newData.add(data.doc(majorityExamples.get(i)));
				i = ++i % majorityExamples.length();  // being paranoid here
			}
			
			for (int j=0; j < minorityExamples.length(); j++)
				newData.add(data.doc(minorityExamples.get(j)));			
		}
		
		
		BufferedWriter bw = new BufferedWriter(new OutputStreamWriter
				(new FileOutputStream(output + ".data"), "utf-8"));
		
		for (SequenceDocument doc : newData) {

			for (int i=0; i < doc.tokens.length; i++) {
				bw.write(doc.tokens[i] + " " + doc.labels[i]);
				for (int j=0; j < doc.features[i].num; j++)
					bw.write(" " + doc.features[i].ids[j] + ":" + doc.features[i].vals[j]);
				bw.write("\n");
			}
			bw.write("\n");
		}
		bw.close();
		
		FileOperations.copy(input + ".fts", output + ".fts");
		FileOperations.copy(input + ".labels", output + ".labels");
	}
}
