/**
 * 
 */
//package mapreduce4j;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Pattern;

import mapreduce4j.Job;
import mapreduce4j.TextInputFormat;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

/**
 * 
 * 
 * @author tim
 */
public class WordCount
{

  /**
   * Tokens the input line and emits the word and one as the value
   */
  public static class WordTokenizer extends Mapper<LongWritable, Text, Text, IntWritable>
  {
    static Pattern delimiter = Pattern.compile(" ");

    @Override
    protected void setup(Context context) throws IOException,
        InterruptedException
    {
      super.setup(context);
      // just to illustrate getting something out of the context
      String name = context.getConfiguration().get("name");
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException,
        InterruptedException
    {
      context.setStatus("Key[" + key.get() + "], Value[" + value.toString() + "]");
      String[] parts = delimiter.split(value.toString());
      for (String s : parts)
      {
        context.write(new Text(s), new IntWritable(1));
      }
    }
  }

  /**
   * Takes the words as keys and counts them
   */
  public static class SumWordCount extends Reducer<Text, IntWritable, Text, IntWritable>
  {
    public static BufferedWriter _writer;
  
    static String _outputFilename = "";

    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException,
        InterruptedException
    {
      int total = 0;
      for (IntWritable v : values)
      {
        total += v.get();
      }
      System.out.println("Reduce");
      _writer.write(key + " " + total + "\n");
      _writer.flush();
    }

    @Override
    protected void setup(Context context) throws IOException,
        InterruptedException
    {
      // just for illustration that the setup is called once
      super.setup(context);
    }

    @Override
    protected void cleanup(Context context) throws IOException,
        InterruptedException
    {
      // just for illustration that a cleanup is called
      super.cleanup(context);
    }

  }

  
  public static BufferedWriter getWriter(String filename)
  {
    try
    {
      return new BufferedWriter(new FileWriter(filename));
    }
    catch (IOException e)
    {
      e.printStackTrace();
      return null;
    }
  }
  
  /**
   * @param args
   *          <in> <out> <mapping-file>
   *          <table-name>
   */
  public static void main(String[] args) throws Exception
  {
    System.out.println("Starting");
    Logger.global.setLevel(Level.OFF);
    Configuration conf = new Configuration();

    // the following is NOT a Hadoop job but a MapReduce4J job
    // this would need replaced to run on a Hadoop cluster
    Job job = new Job(conf, "WordCount");
    job.setMapperClass(WordTokenizer.class);
    job.setReducerClass(SumWordCount.class);
    
    SumWordCount._writer = getWriter(args[1]);
    File inputFile = new File(args[0]);
    if(!inputFile.exists())
      System.out.println("Input file: " + args[0] + " does not exist");
    job.addInput(new File(args[0]));
    job.setInputFormat(TextInputFormat.class);

    job.waitForCompletion(true);
    
    SumWordCount._writer.close();
  }
}
