import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.dfs.DistributedFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;


public class TotalWords {

    public static class TWCMapper extends MapReduceBase implements Mapper {

        private final static Text DEF_KEY = new Text("words: ");

        public void map(WritableComparable key, Writable value,
                OutputCollector output, Reporter reporter) throws IOException {

            String line = ((Text)value).toString();
            StringTokenizer itr = new StringTokenizer(line);
            int sum = 0;
            while (itr.hasMoreTokens()) {
                itr.nextToken();
                sum++;
            }
            output.collect(DEF_KEY, new IntWritable(sum));
        }
    }

    public static class TWCReducer extends MapReduceBase implements Reducer {

        @SuppressWarnings("unchecked")
        public void reduce(WritableComparable key, Iterator values,
                OutputCollector output, Reporter reporter) throws IOException {

            int sum = 0;
            while (values.hasNext()) {
                sum += ((IntWritable) values.next()).get();
            }
            output.collect(key, new IntWritable(sum));
        }

    }

    public static int returnTotalWords (Path dfsInputPath) throws IOException {
        JobConf conf = new JobConf(TotalWords.class);
        DistributedFileSystem fs = ProjectMain.getDFS(conf);
        Path out = new Path ("out");

        conf.setWorkingDirectory(new Path("/"));
        conf.setInputPath(dfsInputPath);
        conf.setOutputPath(out);
        fs.delete(out);

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);
        conf.setMapperClass(TotalWords.TWCMapper.class);
        conf.setCombinerClass(TotalWords.TWCReducer.class);
        conf.setReducerClass(TotalWords.TWCReducer.class);
        JobClient.runJob(conf);

        if (!fs.exists(new Path ("out/part-00000")))
        	return 0;
        StringTokenizer itr = new StringTokenizer (ProjectMain.fileToString
                (new Path ("out/part-00000"), fs));
        itr.nextToken();
        fs.delete(out);
        return Integer.parseInt(itr.nextToken());
    }

    public static void main(String[] args) throws IOException {

        Path dfsInput = new Path (args[0]);
        DistributedFileSystem fs = ProjectMain.getDFS(new JobConf (TotalWords.class));
        fs.setWorkingDirectory(new Path ("/"));
        fs.delete(dfsInput);
        fs.copyFromLocalFile(dfsInput, dfsInput);
        System.out.println("Total words: " + returnTotalWords(dfsInput));
        fs.delete(dfsInput);
    }
}
