package cn.com.mrAdv05;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Driver {

	public static void main(String[] args) throws Exception {

		// 1. 拿到集群的配置信息
		Configuration conf = new Configuration(); // 补充: 可以往容器中设置自定义的参数

		// 2. 创建job
		Job job = Job.getInstance(conf);
		// 3. 设置 JarClass
		job.setJarByClass(Driver.class);
		// 4. 设置Mapper Reducer
		job.setMapperClass(TokenizerMapper.class);
		job.setReducerClass(IntSumReducer.class);
		// 5. 输入输出的类型
		job.setMapOutputKeyClass(ExpenseAccountWritable.class);
		job.setMapOutputValueClass(DoubleWritable.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(DoubleWritable.class);
		// 6. 设置输入和输出的路径
		FileInputFormat.addInputPath(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		// 7. 启动执行
		System.exit(job.waitForCompletion(true) ? 0 : 1);

	}

}
