package histogram;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import global.GlobalConsts;

public class HistogramMapper extends MapReduceBase implements Mapper<LongWritable, Text, MyTripleKey, IntWritable> {

	private final static IntWritable outValue = new IntWritable(1);
	private MyTripleKey outKey;
	private int binSizeR0, binSizeR1;

	@Override
	public void configure(JobConf job) {
		binSizeR0 = job.getInt(GlobalConsts.paramNameBinSizeR0, GlobalConsts.defaultBinSizeR0);
		binSizeR1 = job.getInt(GlobalConsts.paramNameBinSizeR1, GlobalConsts.defaultBinSizeR1);
	}
	
	@Override
	public void map(LongWritable key, Text value, OutputCollector<MyTripleKey, IntWritable> output, Reporter reporter) throws IOException {
		FileSplit fileSplit = (FileSplit)reporter.getInputSplit();
		String filename = fileSplit.getPath().getName();
		int datasetId = GlobalConsts.dataSetId0;
		int binSize = binSizeR0;
		
		if (filename.equalsIgnoreCase(GlobalConsts.fileNameRelation1)) {
			datasetId = GlobalConsts.dataSetId1;
			binSize = binSizeR1;
		}
		
		String tuple = value.toString();
		String[] fields = tuple.split(GlobalConsts.fieldSeparator);
		
		if (fields.length != 3) {
			throw new IOException("Expecting 3 fields in " + datasetId + " dataset, tuple: " + tuple);
		}
		
		String joinAttr = fields[1];
		int binId = Integer.parseInt(fields[2]) / binSize;
		outKey = new MyTripleKey(datasetId, joinAttr, binId);
		
		output.collect(outKey, outValue);
	}
}
