package job03;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class job03 {
	public static void main(String[] args) {
		try {
			//创建配置信息
			Configuration conf = new Configuration();
			//map内存设置
			conf.set("mapreduce.map.memory.mb", "5120");
			// 获取命令行的参数
			String[] otherArgs = {"F://sparkData//test//02.DxFileMatch/part-m-00*", "F://sparkData//test///03.DxCountPv"};
			//String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
			// 创建任务
			Job job = new Job(conf, "Dx_CountPv");
			// 打成jar包运行，这句话是关键
			job.setJarByClass(job03.class);
			// 设置输入目录
			FileInputFormat.setInputPaths(job, otherArgs[0]);
			// 设置自定义Mapper类和设置map函数输出数据的key和value的类型
			job.setMapperClass(CountPvMapper.class);
			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(IntWritable.class);
			//reducer
			//job.setCombinerClass(CountPvReducer.class); 
	        job.setReducerClass(CountPvReducer.class);
	        job.setOutputKeyClass(NullWritable.class);
	        job.setOutputValueClass(Text.class);
			FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
			//提交作业 判断退出条件（0正常退出，1非正常退出）
			System.exit(job.waitForCompletion(true) ? 0 : 1);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}
