package mapreduce;

import static util.Preconditions.checkNotNull;

import java.io.IOException;
import java.util.List;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

import util.CharacterSet;
import util.Pair;
import associationrules.AssociationRulesMapReduce;

public class AssociationRulesMapper extends MapReduceBase implements Mapper<LongWritable, Text, CharacterSet, LongWritable> {

	private AssociationRulesMapReduce mapReduce;

	@Override
	public void configure(JobConf configuration) {
		mapReduce = new AssociationRulesMapReduce(Long.parseLong(configuration.get("MIN_OCCURRENCES")), 
				Integer.parseInt(configuration.get("SIZE")), Integer.parseInt(configuration.get("SLICE_SIZE")));
	}

	@Override
	public void close() throws IOException {
		// TODO Auto-generated method stub
	}

	@Override
	public void map(LongWritable key, Text value,
			OutputCollector<CharacterSet, LongWritable> output, Reporter reporter)
					throws IOException {
		checkNotNull(key, "key must not be null");
		checkNotNull(value, "value must not be null");
		checkNotNull(output, "outputCollector must not be null");
		checkNotNull(reporter, "reporter must not be null");

		List<Pair<CharacterSet, Long>> results = mapReduce.map(new String(value.getBytes()));

		for (Pair<CharacterSet, Long> result : results) {
			output.collect(result.getFirst(), new LongWritable(result.getSecond()));
		}
	}
}
