package com.honey.mapreduce.flowcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

//mapreduce主程序
public class FlowCount {

    /**
     * map阶段
     */
    static class FlowCountMapper extends Mapper<LongWritable, Text,Text,FlowBean>{

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            //1.将行内容转成string
            String line_con = value.toString();
            //2.切分字段
            String[] arr = line_con.split("\t");
            //3.取出手机号码
            String phone = arr[1];//第二个元素
            //4.取出上行和下行流量
            Long  max = Long.parseLong(arr[arr.length-3]);//倒数第三个是max
            Long  min = Long.parseLong(arr[arr.length-2]);//倒数第二个是min
            //5.写入数据
            context.write(new Text(phone), new FlowBean(max,min));
        }
    }

    /**
     * reduce阶段
     */
    static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
        @Override
        protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {

            Long sum_max = 0L;
            Long sum_min = 0L;

            //循环叠加最大和最小值
            for (FlowBean bean: values) {
                sum_max = sum_max + bean.getMax();
                sum_min = sum_min + bean.getMin();
            }
            context.write(key,new FlowBean(sum_max,sum_min));

        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        //1.获取配置信息，或com.honey.reduce_self.flowcount.FlowCount者job对象实例
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        //6.指定本程序jar包所在的本地路径
        job.setJarByClass(FlowCount.class);

        // 8 指定自定义数据分区
        job.setPartitionerClass(ProvincePartitioner.class);
        /**
         * 如果reduceTask的数量 > getPartition的结果数，则会多产生几个空的输出文件part-r-000xx；
         * 如果1 < reduceTask的数量 < getPartition的结果数，则有一部分分区数据无处安放，会Exception；
         * 如果reduceTask的数量=1，则不管mapTask端输出多少个分区文件，最终结果都交给这一个reduceTask，最终也就只会产生一个结果文件 part-r-00000；
         */
        // 9 同时指定相应数量的reduce task
        job.setNumReduceTasks(1);

        //2.指定本业务job要使用的mapper/reducer业务类
        job.setMapperClass(FlowCountMapper.class);
        job.setReducerClass(FlowCountReducer.class);

        //3.指定mapepr输出kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

        //4.指定最终数据输出kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        //5.指定job原始文件输入和输出目录,注意倒包
        FileInputFormat.setInputPaths(job, new Path(args[0]));

        Path outPath = new Path(args[1]);
        FileSystem fs = FileSystem.get(conf);
        if (fs.exists(outPath)) {
			fs.delete(outPath, true);
		}
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

        //7.将job中配置的相关参数，以及job所用的java类所在的jar包， 提交给yarn去运行
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}
