package de.jaberu.hadoop.example;

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;


public class WordCount {

	/*
	 * When you extent the MapReduce mapper class(2.0) or implement you specify
	 * the key/value types for your inputs and outputs. You use the MapReduce
	 * default InputFormat for your Job which supplies keys as byte offset
	 * (LongWritable) into the input file and values(Text) as each line in the
	 * file. Your map emits key/value pairs. In this case the map-key has a Text
	 * data type and the value has a IntWritable data type In case of any other
	 * structured input data Hadoop provides an opportunity to implement your
	 * own input reader
	 */

	public static class Map implements
			Mapper<LongWritable, Text, Text, IntWritable> {
		private final static IntWritable one = new IntWritable(1);
		/*
		 * To cut down on object creation you create a single Text object, which
		 * you will reuse
		 */
		private Text word = new Text();

		/*
		 * This map- method is called once per input line; map tasks are run in
		 * parallel over subsets of the input files
		 */
		@Override
		public void map(LongWritable key, Text value,
				OutputCollector<Text, IntWritable> output, Reporter reporter)
				throws IOException {
			// Your value contains an entire line from your file
			String line = value.toString();
			// You tokenize the line using StringTokenizer
			StringTokenizer tokenizer = new StringTokenizer(line);

			while (tokenizer.hasMoreTokens()) {
				word.set(tokenizer.nextToken().toLowerCase());
				// Thats the map-output (Text/IntWritable)
				output.collect(word, one);
			}
		}

		public void configure(JobConf arg0) {
			// TODO Auto-generated method stub
			
		}

		public void close() throws IOException {
			// TODO Auto-generated method stub
			
		}
	}

	/*
	 * the goal of the reducer is to create an outputline for each word and
	 * assign the information how often the word occurs in all files The first
	 * two args are dependend from the map-output and the last two args specify
	 * the reduce output e.g. jens|1 / jens|5 (if you have 5 map outputs in
	 * terms of jens|1) The method is called once per unique map output key. The
	 * iterable allows you to iterate over all the values that were emitted for
	 * the given key
	 */

	public static class Reduce extends MapReduceBase implements
			Reducer<Text, IntWritable, Text, IntWritable> {
		public void reduce(Text key, Iterator<IntWritable> values,
				OutputCollector<Text, IntWritable> output, Reporter reporter)
				throws IOException {
			int sum = 0;
			while (values.hasNext()) {
				sum += values.next().get();
			}
			output.collect(key, new IntWritable(sum));
		}
	}

	public static void main(String[] args) throws Exception {
		JobConf conf = new JobConf(WordCount.class);
		conf.setJobName("wordcount");

		conf.setOutputKeyClass(Text.class);
		conf.setOutputValueClass(IntWritable.class);

		conf.setMapperClass(Map.class);
		conf.setCombinerClass(Reduce.class);
		conf.setReducerClass(Reduce.class);
		conf.setInputFormat(TextInputFormat.class);
		conf.setOutputFormat(TextOutputFormat.class);

		FileInputFormat.setInputPaths(conf, new Path(args[0]));
		FileOutputFormat.setOutputPath(conf, new Path(args[1]));
		/*
		 * JobClient Checking the input and output specifications of the job.
		 * Computing the InputSplits for the job. Setup the requisite accounting
		 * information for the DistributedCache of the job, if necessary.
		 * Copying the job's jar and configuration to the map-reduce system
		 * directory on the distributed file-system. Submitting the job to the
		 * JobTracker and optionally monitoring it's status.
		 */
		JobClient.runJob(conf);
	}
}