package com.mapreduce;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 想yarn集群提交我们的mapreduce程序包
 * @author Administrator
 *
 */
public class WordCountRunner {

	/**
	 * mapreduce框架已经经历了两代API，旧API中的包名一般将mapreduce简写成mapred
	 * 直接用main函数来进行job提交，这是旧API的写法，在新API中，提交job的方法应该用ToolRunner中run方法
	 * @param args
	 * @throws IOException
	 * @throws InterruptedException 
	 * @throws ClassNotFoundException 
	 */
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		Configuration conf = new Configuration();
		//先构造一个用来提交我们的业务程序的一个信息封装对象
		Job job = Job.getInstance(conf);
		//将我们的业务程序所在地jar的路径封装到job中
		job.setJarByClass(WordCountRunner.class);
		//指定本job所采用的mapper类
		job.setMapperClass(WordCountMapper.class);
		//指定本job所采用的reduce类
		job.setReducerClass(WordCountReduce.class);
		//指定我们的mapper类输出的kv数据类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(LongWritable.class);
		//指定我们的reducer类输出的kv数据类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(LongWritable.class);
		//指定我们要处理的文件所在地路径
		FileInputFormat.setInputPaths(job, new Path("hdfs://127.0.0.1:9000/wordcount/srcdata/"));
		//指定我们的输出结果文件所存放的路径
		FileOutputFormat.setOutputPath(job, new Path("hdfs://127.0.0.1:9000/wordcount/output"));
		
		System.exit(job.waitForCompletion(true)?0:1);
	}
}
