package flow;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.net.URI;

/**
 * 周六日的作业
 *   有一个文件 flow.txt  里面记载者手机的流量耗费信息
 *    13480253104 180 180 360
 *    13502468823 7335 110349 117684
 *    13560436666 3597 25635 29232
 *    13560439658 2034 5892 7926
 *
 * 要求编写mr程序实现如下效果：
 *    1、mapper阶段的输入的数据类型是key是FlowBean,value是NullWritable：InputFormat(自定义InputFormat)
 *    2、要求把数据进行排序---排序
 *       排序规则如下：按照总流量降序排序 如果总流量一致 那么按照上行流量升序排序  如果上行流量和总流量都一致 那么按照下行流量排序
 *    3、要求将结果输出到三个文件中---分区
 *        规则：总流量大于100小于5000的 输出到一个文件
 *             总流量大于5000小于10000的 输出到一个文件
 *              总流量大于10000的 输出到一个文件
 *
 */
public class Work {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://192.168.0.40:9000");
        Job job = Job.getInstance(conf);
        // 1.job关联MyInputFormat --- mapper -- partitioner -- reducer
        job.setJarByClass(Work.class);

        job.setInputFormatClass(MyInputFormat.class);

        job.setMapperClass(WCMapper.class);
        job.setMapOutputKeyClass(Flow.class);
        job.setMapOutputValueClass(NullWritable.class);

        job.setPartitionerClass(FlowPartitioner.class);
        job.setNumReduceTasks(4);

        job.setReducerClass(WCReducer.class);
        job.setOutputKeyClass(Flow.class);
        job.setOutputValueClass(NullWritable.class);

        // 2.关联文件的输入输出
        FileInputFormat.setInputPaths(job,"/phone1/part-r-00000"); // 之前处理过的part-r-00000就是flow.txt
        Path path = new Path("/output2");
        // 3.连接hdfs分布式文件系统
        FileSystem fs = FileSystem.get(new URI("hdfs://192.168.0.40:9000"),conf,"root");
        // 4.是否存在此输出路径，如存在则删除
        if (fs.exists(path)){
            fs.delete(path,true);
        }
        FileOutputFormat.setOutputPath(job,path);


        // 5.最后 提交运行
        boolean b = job.waitForCompletion(true);
        System.exit(b?0:1);
    }
}
class WCMapper extends Mapper<Flow,NullWritable,Flow,NullWritable> {
    @Override
    protected void map(Flow key, NullWritable value, Context context) throws IOException, InterruptedException {
        context.write(key,NullWritable.get());
    }
}
class WCReducer extends Reducer<Flow, NullWritable,Flow,NullWritable>{
    @Override
    protected void reduce(Flow key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
        context.write(key,NullWritable.get());
    }
}
