package com.jida.hadoop.mr.webuv;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import com.jida.hadoop.mr.tools.UVCountReducer;
import com.jida.hadoop.mr.tools.UVMapper;



public class DxWebUV {

	public static void main(String[] args) {
		try {
			//创建配置信息
			Configuration conf = new Configuration();
			//map内存设置
			conf.set("mapreduce.map.memory.mb", "5120");
			//不检查超时，由于集群环境不稳定有超时现象，所以设置为不检查，但需要保证程序无死循环
			conf.set("mapred.task.timeout", "0");
			//集群机器少的时候可以设置：客户端在写失败的时候，是否使用更换策略
			conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER"); 
			conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true"); 
			//conf.set("mapreduce.reduce.memory.mb", "2048");
			// 获取命令行的参数
			String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
			// 创建任务
			Job job = new Job(conf, "DxWebUV");
			// 打成jar包运行，这句话是关键
			job.setJarByClass(DxWebUV.class);
			// 1.1 设置输入目录和设置输入数据格式化的类
			FileInputFormat.setInputPaths(job, "file:///D:/测试数据/dx/03.DxFileUser/part-r-*");
			job.setMapperClass(UVMapper.class);
			job.setReducerClass(UVCountReducer.class);
			job.setOutputKeyClass(Text.class);
			job.setOutputValueClass(Text.class);
			job.setNumReduceTasks(1);
			FileOutputFormat.setOutputPath(job, new Path("file:///D:/测试数据/dx/04.DxUserRanke"));
			//提交作业 判断退出条件（0正常退出，1非正常退出）
			System.exit(job.waitForCompletion(true) ? 0 : 1);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}