package com.hadoop.demo.mapreduce.flexcount;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * =================================
 * <p>
 * Created by cjj on 18-10-10.
 * <p>
 * 描述: 如何从多个文件中统计数据，并将从每个文件中统计得到的信息展示出来
 */

public class FlexCountRunner {

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {


        Configuration conf = new Configuration();

        //指定作业
        Job job1 = Job.getInstance(conf);

        //设置整个job所有类在那个jar包
        job1.setJarByClass(FlexCountRunner.class);

        job1.setMapperClass(StepOneMapper.class);
        job1.setReducerClass(StepOneReduce.class);

        //指定mapper的输出类型
        job1.setMapOutputKeyClass(Text.class);
        job1.setMapOutputValueClass(LongWritable.class);

        //指定reduce的输出类型
        job1.setOutputKeyClass(Text.class);
        job1.setOutputValueClass(LongWritable.class);

        //把路径改为本地路径即可在本地运行

        //指定原始数据存放在那
        //FileInputFormat.setInputPaths(job,new Path("/home/cjj/project/docker-common/hadoopdemo/logs/input/"));
        FileInputFormat.setInputPaths(job1, new Path("/home/cjj/temp/input/"));


        //指定处理结果的输出数据存放路径
        String outputPath = "/home/cjj/temp/output/";
        Path path = new Path(outputPath);
        FileSystem fileSystem = FileSystem.get(conf);
        if (fileSystem.exists(path)) {
            fileSystem.delete(path, true);
        }
        FileOutputFormat.setOutputPath(job1, path);

        //将job提交给集群运行
        boolean b = job1.waitForCompletion(true);
        if(b){
            Job job2 = Job.getInstance(conf);

            //设置整个job所有类在那个jar包
            job2.setJarByClass(FlexCountRunner.class);

            job2.setMapperClass(StepTwoMapper.class);
            job2.setReducerClass(StepTwoReduce.class);

            //指定mapper的输出类型
            job2.setMapOutputKeyClass(Text.class);
            job2.setMapOutputValueClass(Text.class);
            //指定reduce的输出类型
            job2.setOutputKeyClass(Text.class);
            job2.setOutputValueClass(Text.class);

            //设置自定义的分组逻辑
            job2.setPartitionerClass(FlexPartitioner.class);
            //分几个组设置几个任务，如果任务设置过多会生成空文件，因为拿不到数据。如果少了会报错
            job2.setNumReduceTasks(2);

            //把路径改为本地路径即可在本地运行

            //指定原始数据存放在那
            //FileInputFormat.setInputPaths(job,new Path("/home/cjj/project/docker-common/hadoopdemo/logs/input/"));
            FileInputFormat.setInputPaths(job2, new Path("/home/cjj/temp/output/"));


            //指定处理结果的输出数据存放路径
            String outputPath2 = "/home/cjj/temp/output2/";
            Path path2 = new Path(outputPath2);
            FileSystem fileSystem1 = FileSystem.get(conf);
            if (fileSystem1.exists(path2)) {
                fileSystem1.delete(path2, true);
            }
            FileOutputFormat.setOutputPath(job2, path2);

            //将job提交给集群运行
            job2.waitForCompletion(true);
        }
        System.exit(0);
    }

    /**
     * 统计每一行相同单词的个数
     */
    public static class StepOneMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
            String[] fields = StringUtils.split(line, " ");

            //获取切片信息 文件切片
            FileSplit fileSplit = (FileSplit) context.getInputSplit();
            String name = fileSplit.getPath().getName();
            for (String field : fields) {
                context.write(new Text(field + "-->" + name), new LongWritable(1));
            }
        }
    }

    /**
     * 求和
     */
    public static class StepOneReduce extends Reducer<Text, LongWritable, Text, LongWritable> {

        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
            long count = 0;
            for (LongWritable writable : values) {
                count += writable.get();
            }
            context.write(key, new LongWritable(count));
        }
    }


    /**
     * 统计每个单词在每一个文件中的个数
     */
    public static class StepTwoMapper extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
            String[] fields = StringUtils.split(line, "\t");

            String[] split = StringUtils.split(fields[0], "-->");
            context.write(new Text(split[0]), new Text(split[1] + "-->" + fields[1] + " "));
        }
    }

    /**
     * 求和
     */
    public static class StepTwoReduce extends Reducer<Text, Text, Text, Text> {

        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            String count = "";
            for (Text text : values) {
                count += text.toString();
            }
            context.write(key, new Text(count));
        }
    }


    public static class FlexPartitioner<KEY,VALUE> extends Partitioner<KEY,VALUE>{

        public int getPartition(KEY key, VALUE value, int numPartitions) {
            //根据单词的长度分成两个组 高于五个单词和低于五个单词
            if(key.toString().length()>5){
                return 1;
            }
            return 0;
        }
    }


}
