package com.leadbank.bigdata.mapreduce.mobileflow;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.net.URI;

/**
 * Created by hp on 2018/5/9.
 */
public class MyKpiJob extends Configured implements Tool {

    /*
     * 自定义Mapper类，重写了map方法
     */
    public static class MyMapper extends Mapper<LongWritable, Text, Text, KpiWritable> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] splited = value.toString().split("\t");
            // 获取手机号码
            String msisdn = splited[1];
            // 转换为Hadoop数据类型并作为k2
            Text k2 = new Text(msisdn);
            KpiWritable v2 = new KpiWritable(splited[6], splited[7], splited[8], splited[9]);
            context.write(k2, v2);
        }
    }

    public static class MyReducer extends Reducer<Text, KpiWritable, Text, KpiWritable> {
        @Override
        protected void reduce(Text key, Iterable<KpiWritable> values, Context context) throws IOException, InterruptedException {
            //上行数据包数，单位：个
            long upPackNum = 0L;
            //下行数据包数，单位：个
            long downPackNum = 0L;
            //上行总流量，单位：byte
            long upPayLoad = 0L;
            //下行总流量，单位：byte
            long downPayLoad = 0L;
            for (KpiWritable kpiWritable : values) {
                upPackNum += kpiWritable.getUpPackNum();
                downPackNum += kpiWritable.getDownPackNum();
                upPayLoad += kpiWritable.getUpPayLoad();
                downPayLoad += kpiWritable.getDownPayLoad();
            }

            KpiWritable v3 = new KpiWritable(upPackNum, downPackNum, upPayLoad, downPayLoad);
            context.write(key, v3);
        }
    }

    /**
     *
     * 自定义Partitioner类
     */
    public static class KpiPartitioner extends Partitioner<Text, KpiWritable> {

        @Override
        public int getPartition(Text text, KpiWritable kpiWritable, int i) {
            // 实现不同的长度不同的号码分配到不同的reduce task中
            int numLength = text.getLength();
            if(numLength==11) {
                return 0;
            } else {
                return 1;
            }
        }
    }

    // 输入文件目录
    public static final String INPUT_PATH = "hdfs://master:8020/tmp/mobileflow/input/HTTP_20130313143750.dat";
    // 输出文件目录
    public static final String OUTPUT_PATH = "hdfs://master:8020/tmp/mobileflow/output/mobilelog";

    @Override
    public int run(String[] args) throws Exception {
        // 首先删除输出目录已生成的文件
        FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), getConf());
        Path outPath = new Path(OUTPUT_PATH);
        if(fileSystem.exists(outPath)) {
            fileSystem.delete(outPath, true);
        }

        Job wcjob = Job.getInstance(getConf(), "MyKpiJob");
        //指定我这个job所在的jar包
        //		wcjob.setJar("/home/hadoop/wordcount.jar");
        wcjob.setJarByClass(MyKpiJob.class);

        wcjob.setMapperClass(MyMapper.class);
        wcjob.setReducerClass(MyReducer.class);
        wcjob.setPartitionerClass(KpiPartitioner.class);
        //设置我们的业务逻辑Mapper类的输出key和value的数据类型
        wcjob.setMapOutputKeyClass(Text.class);
        wcjob.setMapOutputValueClass(KpiWritable.class);
        //设置我们的业务逻辑Reducer类的输出key和value的数据类型
        wcjob.setOutputKeyClass(Text.class);
        wcjob.setOutputValueClass(KpiWritable.class);

        //指定要处理的数据所在的位置
        org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(wcjob, INPUT_PATH);
//        for (int i = 0; i < otherArgs.length - 1; ++i) {
//            FileInputFormat.addInputPath(wcjob, new Path(otherArgs[i]));
//        }
        //指定处理完成之后的结果所保存的位置
        FileOutputFormat.setOutputPath(wcjob, new Path(OUTPUT_PATH));
//        FileOutputFormat.setOutputPath(wcjob,
//                new Path(otherArgs[otherArgs.length - 1]));

        //向yarn集群提交这个job
        boolean res = wcjob.waitForCompletion(true);
        System.exit(res ? 0 : 1);



        return 0;
    }

    public static void main(String[] args) {
        Configuration conf = new Configuration();
        try {
            int res = ToolRunner.run(conf, new MyKpiJob(), args);
            System.exit(res);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
