package learning.data;

import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

import learning.data.document.InstanceDocument;
import learning.data.document.SequenceDocument;

public class BalancedResampler {
	
	// takes a dataset (view) and creates a new resampled dataset that balances
	// the number of examples from different classes
	
	public enum Strategy { UNDER_SAMPLE, OVER_SAMPLE };
	
	// the minority class should get a weight between 0.5 and 0.9
	static int minorityClassLabel = 1;
	//static float minorityClassWeight = .5f;	
	//static Strategy rebalanceStrategy = Strategy.OVER_SAMPLE;
	//static float minorityClassWeight = .2f;
	static float minorityClassWeight = .01f;
	static Strategy rebalanceStrategy = Strategy.UNDER_SAMPLE;
	
	static DecimalFormat df = new DecimalFormat("0.00");
	static int maxSize;
	
	public BalancedResampler(int maxSize) {
		this.maxSize = maxSize;
	}
	
	public Dataset<SequenceDocument> resample(Dataset<SequenceDocument> data) {
		
		List<SequenceDocument> newData = new ArrayList<SequenceDocument>();
		List<SequenceDocument> majorityExamples = new ArrayList<SequenceDocument>();
		List<SequenceDocument> minorityExamples = new ArrayList<SequenceDocument>();
		
		for (SequenceDocument doc : data) {
			boolean me = false;
			for (int i=0; i < doc.labels.length; i++)
				if (doc.labels[i] == minorityClassLabel)
					me = true;
			if (me) 
				minorityExamples.add(doc);
			else
				majorityExamples.add(doc);
		}
		
		if (rebalanceStrategy == Strategy.OVER_SAMPLE) {
			
			int minorityTargetSize = 
				(int)(majorityExamples.size() * (minorityClassWeight / (1-minorityClassWeight)));
			
			int i = 0;
			while (newData.size() < minorityTargetSize) {
				//maybe add random sampling here in the future
				newData.add(minorityExamples.get(i));
				i = ++i % minorityExamples.size();
			}
			
			for (int j=0; j < majorityExamples.size(); j++)
				newData.add(majorityExamples.get(j));
			
		}
		
		if (rebalanceStrategy == Strategy.UNDER_SAMPLE) {
			
			int majorityTargetSize = 
				(int)(minorityExamples.size() * (1-minorityClassWeight) / minorityClassWeight);
			majorityTargetSize = Math.min(majorityTargetSize, maxSize - minorityExamples.size());
			
			Collections.shuffle(majorityExamples);
			
			System.out.println("resampling: " + minorityExamples.size() + ":" + majorityExamples.size() +
					" (" + df.format(minorityExamples.size() / (double)(minorityExamples.size() + majorityExamples.size())) + ")" + 
					" to " + minorityExamples.size() + ":" + majorityTargetSize +
					" (" + df.format(minorityExamples.size() / (double)(minorityExamples.size() + majorityTargetSize)) + ")");
			
			int i=0;
			while (newData.size() < majorityTargetSize) {
				// maybe add random sampling here in the future
				newData.add(majorityExamples.get(i));
				i = ++i % majorityExamples.size();  // being paranoid here
			}
			
			for (int j=0; j < minorityExamples.size(); j++)
				newData.add(minorityExamples.get(j));			
		}
		
		Collections.shuffle(newData);
		
		return new Dataset<SequenceDocument>(newData.toArray(new SequenceDocument[0]), 
			data.getFeatureNames(), data.getFeatureIds(), data.getLabelNames(), data.getLabelIds());
	}

	public Dataset<InstanceDocument> resampleId(Dataset<InstanceDocument> data) {
		
		List<InstanceDocument> newData = new ArrayList<InstanceDocument>();
		List<InstanceDocument> majorityExamples = new ArrayList<InstanceDocument>();
		List<InstanceDocument> minorityExamples = new ArrayList<InstanceDocument>();
		
		for (InstanceDocument doc : data) {
			if (doc.label == minorityClassLabel)
				minorityExamples.add(doc);
			else
				majorityExamples.add(doc);
		}
		
		if (rebalanceStrategy == Strategy.OVER_SAMPLE) {
			
			int minorityTargetSize = 
				(int)(majorityExamples.size() * (minorityClassWeight / (1-minorityClassWeight)));
			
			int i = 0;
			while (newData.size() < minorityTargetSize) {
				//maybe add random sampling here in the future
				newData.add(minorityExamples.get(i));
				i = ++i % minorityExamples.size();
			}
			
			for (int j=0; j < majorityExamples.size(); j++)
				newData.add(majorityExamples.get(j));
			
		}
		
		if (rebalanceStrategy == Strategy.UNDER_SAMPLE) {
			
			int majorityTargetSize = 
				(int)(minorityExamples.size() * (1-minorityClassWeight) / minorityClassWeight);
			majorityTargetSize = Math.min(majorityTargetSize, maxSize - minorityExamples.size());
			
			Collections.shuffle(majorityExamples);
			
			if (majorityTargetSize >= majorityExamples.size()) {
				// don't do anything here
				System.out.println("ignoring: " + minorityExamples.size() + ":" + majorityExamples.size() +
						" (" + df.format(minorityExamples.size() / (double)(minorityExamples.size() + majorityExamples.size())) + ")");
				return data;
			}

			System.out.println("resampling: " + minorityExamples.size() + ":" + majorityExamples.size() +
					" (" + df.format(minorityExamples.size() / (double)(minorityExamples.size() + majorityExamples.size())) + ")" + 
					" to " + minorityExamples.size() + ":" + majorityTargetSize +
					" (" + df.format(minorityExamples.size() / (double)(minorityExamples.size() + majorityTargetSize)) + ")");
				
			
			int i=0;			
			while (newData.size() < majorityTargetSize && majorityExamples.size() > 0) {
				// maybe add random sampling here in the future
				newData.add(majorityExamples.get(i));
				i = ++i % majorityExamples.size();  // sometimes majorityExamples are less than minorityExamples
			}
			
			for (int j=0; j < minorityExamples.size(); j++)
				newData.add(minorityExamples.get(j));			
		}
		
		Collections.shuffle(newData);
		
		return new Dataset<InstanceDocument>(newData.toArray(new InstanceDocument[0]), 
			data.getFeatureNames(), data.getFeatureIds(), data.getLabelNames(), data.getLabelIds());
	}
}
