package com.dzx;

/**
 * @author DuanZhaoXu
 * @ClassName:
 * @Description:
 * @date 2018年12月04日 16:04:16
 */

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * 使用mapReduce  开发PartitionerApp应用程序，将不同类型的手机 销售情况输出到 不同的part-0000 输出文件中
 * xiaomi 200
 * huawei 300
 * xiaomi 100
 * huawei 200
 * iphone7 300
 * iphone7 500
 * nokia 20
 */
public class PartitionerApp {
    public static class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable> {


        /**
         * map 读取输入的文件
         *
         * @param key
         * @param value
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            // super.map(key, value, context);
            //接收到的文件的每一行数据
            String[] wordArray = value.toString().split(" ");
            //通过上下文将 map 的处理结果输出
            context.write(new Text(wordArray[0]), new LongWritable(Long.valueOf(wordArray[1])));

        }
    }

    public static class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
        /**
         * reduce
         *
         * @param key
         * @param values
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {

            long sum = 0;
            for (LongWritable longWritable : values) {
                sum += longWritable.get();
            }
            context.write(key, new LongWritable(sum));

        }
    }


    public static class  MyPartitioner extends Partitioner<Text, LongWritable>{
        @Override
        public int getPartition(Text text, LongWritable longWritable, int i) {
            String str = text.toString();
            if ("xiaomi".equals(str)) {
                return 0;
            } else if ("huawei".equals(str)) {
                return 1;
            } else if ("iphone7".equals(str)) {
                return 2;
            }
            return 3;
        }
    };


    /**
     * 定义 Driver ：封装了MapReduce作业的所以信息
     * <p>
     * String[] args 参数数组  ： args[0]  输入文件路径  args[1]输出文件
     * * @param args
     */
    public static void main(String[] args) throws Exception {
        //创建
        Configuration configuration = new Configuration();
        //mapReduce 再次执行同样的任务，如果同名输出文件已存在会报错，所以首先如果文件已存在，先进行删除操作
        FileSystem fileSystem = FileSystem.newInstance(configuration);
        Path outPath = new Path(args[1]);
        if (fileSystem.exists(outPath)) {
            System.out.println("删除已经存在的 " + args[1] + "文件");
            fileSystem.delete(outPath, true);
        }
        //创建 job
        Job job = Job.getInstance(configuration, "wordcount");
        // 设置job的处理类
        job.setJarByClass(PartitionerApp.class);
        //设置作业处理的输入路径
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        //设置map的相关参数
        job.setMapperClass(MyMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);
        //设置combiner，相当于本地先进行了一次reduce，当map的结果传输到reduce的时候减少了网络传输量
        job.setCombinerClass(MyReducer.class);

        //设置reduce的相关参数
        job.setReducerClass(MyReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        //设置partitioner
        job.setPartitionerClass(MyPartitioner.class);
        //注意这里一定要设置reducetask 数量
        job.setNumReduceTasks(4);
        //设置作业处理的输出路径
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}
