/**
 * 易码当先 <br>
 * Hadoop-mr <br>
 * cc.ic2c.hadoop.mr.patition <br>
 * FlowCountPartition.java <br>
 * @author Allister.Liu(刘继鹏) <br>
 * Email：laujip@163.com <br>
 * 时间：2017年11月14日-下午8:44:34 <br>
 * 2017Allister-版权所有 <br>
 */
package cc.ic2c.hadoop.mr.patition;

import java.io.IOException;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import cc.ic2c.hadoop.mr.FlowBean;

/**
 *   <br>
 * FlowCountPartition <br>
 * @author Allister.Liu(刘继鹏) <br>
 * Email：laujip@163.com <br>
 * 时间：2017年11月14日-下午8:44:34  <br>
 * @version 1.0.0
 * 
 */
public class FlowCountPartition {
	
	public static class FlowCountPartMapper extends Mapper<LongWritable, Text, Text, FlowBean>{
		
		FlowBean bean = new FlowBean();
		
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {

			try {
				String line = value.toString();
				
				String[] fiels = StringUtils.split(line, "\t");
				
				String phoneNo = fiels[1];
				long upFlow = Long.valueOf(fiels[fiels.length - 3]);
				long dFlow = Long.valueOf(fiels[fiels.length - 2]);
				
				bean.set(phoneNo, upFlow, dFlow);
				
				context.write(new Text(phoneNo), bean);
			} catch (Exception e) {
				System.out.println("Mapper is Error!");
			}
			
		}
	}
	
	public static class FlowCountPartReduce extends Reducer<Text, FlowBean, Text, FlowBean>{
		
		FlowBean bean = new FlowBean();
		
		@Override
		protected void reduce(Text key, Iterable<FlowBean> values, Context context)
				throws IOException, InterruptedException {
			
			long upFlow = 0;
			long dFlow = 0;
			for (FlowBean flowBean : values) {
				upFlow += flowBean.getUpFlow();
				dFlow += flowBean.getdFlow();
			}
			
			bean.set(key.toString(), upFlow, dFlow);
			
			context.write(key, bean);
		}
	}
	
	
	public static void main(String[] args) throws IllegalArgumentException, IOException, ClassNotFoundException, InterruptedException {
		Configuration conf = new Configuration();
		Job job = Job.getInstance(conf,"flowPartjob");
		
		job.setJarByClass(FlowCountPartition.class);
		
		job.setMapperClass(FlowCountPartMapper.class);
		job.setReducerClass(FlowCountPartReduce.class);
		
		/**
		 * 加入自定义分区定义 ： AreaPartitioner
		 */
		job.setPartitionerClass(AreaPatition.class);
		
		/**
		 * 设置reduce task的数量，要跟AreaPartitioner返回的partition个数匹配
		 * 如果reduce task的数量比partitioner中分组数多，就会产生多余的几个空文件
		 * 如果reduce task的数量比partitioner中分组数少，就会发生异常，因为有一些key没有对应reducetask接收
		 * (如果reduce task的数量为1，也能正常运行，所有的key都会分给这一个reduce task)
		 * reduce task 或 map task 指的是，reuder和mapper在集群中运行的实例
		 */
		job.setNumReduceTasks(6);
		
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(FlowBean.class);
		
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FlowBean.class);
		
		job.setInputFormatClass(TextInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		
		FileInputFormat.setInputPaths(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));
		
		job.waitForCompletion(true);
	}

}
