package com.bclz.task.flowpartitioner;

import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

import com.bclz.config.JobRun;

public class MapReduceExe {

	public static class FlowMap extends Mapper<LongWritable, Text, Text, FlowObject> {

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			// TODO Auto-generated method stub

			String[] split = value.toString().split("\t");
			FlowObject f = new FlowObject();
			f.setUpFlow(Long.valueOf(split[split.length - 2]));
			f.setDownFlow(Long.valueOf(split[split.length - 3]));
			f.setPhone(split[1]);
			context.write(new Text(split[1]), f);
		}

	}

	public static class FlowReduce extends Reducer<Text, FlowObject, Text, FlowObject> {

		@Override
		protected void reduce(Text phone, Iterable<FlowObject> flowObjects, Context context)
				throws IOException, InterruptedException {
			// TODO Auto-generated method stub
			Iterator<FlowObject> iterators = flowObjects.iterator();

			Long upFlowTotal = 0L;
			Long downFlowTotal = 0L;

			while (iterators.hasNext()) {
				FlowObject next = iterators.next();
				upFlowTotal += next.getUpFlow();
				downFlowTotal += next.getDownFlow();
			}
			context.write(phone, new FlowObject(phone.toString(), upFlowTotal, downFlowTotal));
		}

	}
	
	public static void main(String[] args) {
		
		System.setProperty("HADOOP_USER_NAME", "hadoop");
		Configuration c=new Configuration();
		c.set("fs.defaultFS", "file:///");
		c.set(MRConfig.FRAMEWORK_NAME, "local");
		
		try {
			Job instance = Job.getInstance(c);
			instance.setJarByClass(MapReduceExe.class);
			
			instance.setMapperClass(FlowMap.class);
			instance.setReducerClass(FlowReduce.class);
			//设置分发切片类
			instance.setPartitionerClass(FlowPartitioner.class);
			
			instance.setMapOutputKeyClass(Text.class);
			instance.setMapOutputValueClass(FlowObject.class);
			
			instance.setOutputKeyClass(Text.class);
			instance.setOutputValueClass(FlowObject.class);
			
			JobRun.runJob(instance, "E:\\hadoop_input\\test\\flow", "E:\\hadoop_input\\test\\textflowPartition", 6);
			
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
}
