package twitterhadoop.app.test;

import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import cache.io.CacheInputFormat;

import twitterhadoop.hadoop.io.TwitterInputFormat;

public class TwitterWordCount {
	public static void main(String[] args) {
		try {
			Job job = new Job();
			job.setJarByClass(TwitterWordCount.class);
			job.setJobName("Twitter Word-Count " + ArrayUtils.toString(args));
			TwitterInputFormat.setQuery(job, "obama", "2013-07-16", 1);
			TwitterInputFormat.setLimit(job, 100);
			
			job.setInputFormatClass(CacheInputFormat.class);
			CacheInputFormat.setDelegateInputFormatData(job, TwitterInputFormat.class, "twitter");

			//job.setInputFormatClass(TwitterInputFormat.class);
			job.getConfiguration().setInt("mapred.map.tasks", 3);

			// FileSystem fs = FileSystem.getLocal(job.getConfiguration());
			FileSystem fs = FileSystem.get(job.getConfiguration());
			Path outputPath = new Path("/tmp/output");
			if (fs.exists(outputPath)) {
				fs.delete(outputPath, true);
			}

			FileOutputFormat.setOutputPath(job, outputPath);
			job.setMapperClass(TwitterWordCountMapper.class);
			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(IntWritable.class);

			job.setReducerClass(TwitterWordCountReducer.class);
			job.setOutputKeyClass(Text.class);
			job.setOutputValueClass(Text.class);
			long startTime = System.currentTimeMillis();
			boolean success;

			success = job.waitForCompletion(true);
			long endTime = System.currentTimeMillis();
			System.exit(success ? 0 : 1);

		} catch (Exception e) {
			e.printStackTrace();
		}

	}
}
