package com.example.hadoop.mapreduce.flowcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

/**
 * Created with IntelliJ IDEA.
 *
 * @Auther: Brian
 * @Date: 2020/04/22/20:34
 * @Description:
 */
public class FlowCount {

    /**
     *  KEY VALUE 是对应Mapper输出的key value类型
     */
    static class ProvincePartitioner extends Partitioner<Text, FlowBean>  {

        private static Map<String, Integer> provinceMap = new HashMap<>();
        static {
            provinceMap.put("136", 0);
            provinceMap.put("137", 1);
            provinceMap.put("138", 2);
            provinceMap.put("139", 3);
        }

        /**
         *  每次mapper执行完后，context的内容会给到output collector
         *  output collector会调用getPartition，获取当前数据所存放的分区
         *  重写获取分区--> 根据自定义规则返回不同的分区
         *  （默认是拿key的hashcode取模，得到相同的key放在同一个分区）
         * @param text
         * @param flowBean
         * @param numPartitions
         * @return
         */
        @Override
        public int getPartition(Text text, FlowBean flowBean, int numPartitions) {
            String phone = text.toString();
            String prefix = phone.substring(0, 3);
            //根据电话号码前三位判断省份，不同省份的
            Integer partition = provinceMap.get(prefix);
            //其他省份的手机号都放到第5个分区
            return partition == null ? 4 : partition;
        }
    }

    static class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            //获取一行日志内容
            String record = value.toString();
            //将日志记录进行切割
            String[] fields = record.split("\t");
            // 截取手机号
            String phone = fields[1];
            //截取上行流量和下行流量
            long upFlow = Long.parseLong(fields[fields.length - 3]);
            long dFlow = Long.parseLong(fields[fields.length - 2]);
            context.write(new Text(phone), new FlowBean(upFlow, dFlow));
        }
    }

    //<121313133,Bean1><121313133,Bean2><121313133,Bean3><121313133,Bean4>
    //<143231232,Bean1><143231232,Bean2><143231232,Bean3><143231232,Bean4>
    //<123533242,Bean1><123533242,Bean2><123533242,Bean3><123533242,Bean4>
    static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
        @Override
        protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
            long upFlow = 0;
            long dFlow = 0;
            for (FlowBean bean : values) {
                upFlow += bean.getUpFlow();
                dFlow += bean.getDFlow();
            }
            context.write(key, new FlowBean(upFlow, dFlow));
        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        if (args == null || args.length < 2) {
            args[0] = "/wordcount/flowinput";
            args[1] = "/wordcount/flowoutput";
        }
        //加载默认配置
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        //当程序运行时，通过类加载器获取类的所在路径
        job.setJarByClass(FlowCount.class);
        //配置mapper和reducer类
        job.setMapperClass(FlowCountMapper.class);
        job.setReducerClass(FlowCountReducer.class);

        //配置map的输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

        //配置自定义数据分区器
        job.setPartitionerClass(ProvincePartitioner.class);
        //同时指定对应“分区”数量的reducetask的数量
        // （一个分区对应一个reducetask，如果只有一个，则所有的数据都是同一个reducetask处理，会输出到同一个文件中）
        //（如果reducetask数量 > 分区数量， 多出来的那部分对应的输入文件没有数据）
        //（如果是其他数量，会报错。因为有些分区的数据不知道找那个reducetask去处理）
        job.setNumReduceTasks(5);

        //配置最终输出类型
        job.setOutputKeyClass(Text.class);
        job.setOutputKeyClass(FlowBean.class);

        Path outputPath = new Path(args[1]);
        FileSystem fileSystem = FileSystem.get(conf);
        if(fileSystem.exists(outputPath)) {
            fileSystem.delete(outputPath, true);
        }
        //设置数据源路径和输入路径
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, outputPath);

        //将Job提交给Yarn
        boolean successfully = job.waitForCompletion(true);
        System.out.println("Successfully? --> " + successfully);
        System.exit(successfully ? 0 : 1);
    }
}
