package com.knight.hadoop.day08.flow.sort;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 对日志数据中的上下行流量信息汇总，并输出按照总流量倒序排序的结果
 * 
 * @author 实习方式：将依据按照电话排序完的数据，作为输入数据，按照总流量作为key。
 */
public class FlowDemoSort {

	static class FlowSortMapper extends Mapper<LongWritable, Text, FlowModel, Text> {
		
		private static FlowModel flowModel = new FlowModel();
		private static Text text = new Text();
		
		@Override
		protected void map(LongWritable key, Text value,Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] values = line.split("\t");
			
			String telNumb = values[0];//电话
			Long upFlow = Long.parseLong(getFlow(values[1]));
			Long dwFlow = Long.parseLong(getFlow(values[2]));
			
			flowModel.set(upFlow, dwFlow);
			text.set(telNumb);
			
			context.write(flowModel, text);
		}
		
		private static String getFlow(String includeeqMarkFlow){
			return includeeqMarkFlow.split("=")[1];
		}
	}
	
	
	static class FlowSortReducer extends Reducer<FlowModel, Text, Text, FlowModel>{
		
		
		@Override
		protected void reduce(FlowModel key, Iterable<Text> value,
				Context context) throws IOException, InterruptedException {
			
			context.write(value.iterator().next(), key);
		}
	}
	
	public static void main(String[] args) throws Exception {
		Configuration configuration = new Configuration();
		//解决这个问题：
		/**
		 * Exception in thread "main" java.io.IOException: No FileSystem for scheme: hdfs
		 * configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
		 */
		//配置hdfs的运行环境
		configuration.set("fs.defaultFS", "hdfs://hadoop4:9000");
		configuration.set("mapreduce.framework.name", "yarn");
		configuration.set("yarn.resourcemanager.hostname", "hadoop4");
		
		Job job = Job.getInstance(configuration);

		// 指定本程序的jar包所在的本地路径
		job.setJar("C:/Users/YQ/Desktop/sort.jar");
		//job.setJarByClass(FlowDemoSort.class);

		// 设置mapper任务执行类
		job.setMapperClass(FlowSortMapper.class);
		// 设置reducer任务执行类
		job.setReducerClass(FlowSortReducer.class);
		
		//设置自定义的数据分发组件
		//job.setPartitionerClass(ProvincePartitioner.class);
		//设置相应分区数量的reduceTask
		//job.setNumReduceTasks(5);
		
		
		// 指定mapper输出数据的kv类型
		job.setMapOutputKeyClass(FlowModel.class);
		job.setMapOutputValueClass(Text.class);

		// 指定最终输出数据的kv类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FlowModel.class);

		// 指定输入参数的目录
		FileInputFormat.setInputPaths(job, new Path(args[0]));
		// 指定输出参数的目录
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		// 将job配置的参数以及job所用的java类所在的jar包，提交到yarn去运行
		/* job.submit(); */
		// 但是我们一般用这个，因为可以等待运行结果返回，查看运行流程
		boolean res = job.waitForCompletion(true);
		System.exit(res ? 0 : 1);
	}
}
