/**
 * 
 */
package org.kang.myhadoop;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * 
 * @author yanglk <br>
 *         <a href="mailto:yanglk2010@sina.com">yanglk2010@sina.com</a>
 * @Time 2017年6月7日 下午4:12:57
 * @since 1.0
 * @version 1.0
 */
public class WordCount extends Configured implements Tool{
	private static final String basePath = "d:/tmp/hadoop/";
	private static final String inputFile = basePath + "input.txt";
	private static final String outputFile = basePath + "output.txt";
	
	public static void main(String[] args) throws Exception {
		ToolRunner.run(new WordCount(), null);
	}

	public int run(String[] paramArrayOfString) throws Exception {
		Job job = new Job();
		job.setJarByClass(WordCount.class);
		job.setJobName("WordCounter");
		FileInputFormat.addInputPath(job, new Path(inputFile));//设置输入文件
		FileOutputFormat.setOutputPath(job, new Path(outputFile));//设置输出文件
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		
		job.setMapperClass(MapClass.class);
		job.setReducerClass(ReducerClass.class);
		
		int returnValue = job.waitForCompletion(true) ? 0 : 1;
		if(job.isSuccessful()){
			System.out.println("Job was Successful");
		}else{
			System.out.println("Job was fail");
		}
		
		return returnValue;
	}

}
