package com.lx.counter;

import com.lx.partitionsum.CovidPartitionDriver;
import com.lx.partitionsum.CovidPartitionMapper;
import com.lx.partitionsum.CovidPartitionReducer;
import com.lx.partitionsum.SatatePartition;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;

/**
 * Created with IntelliJ IDEA.
 *
 * @Author: chenjiang
 * @Date: 2021/11/22/11:50
 * @Description:
 */
public class CounterDriver extends Configured implements Tool {
    private static final String incr = "hadoop";

    public static void main(String[] args) {
        Configuration conf = new Configuration();
        int status = 0;
        try {
            status = ToolRunner.run(conf, new CounterDriver(), args);
        } catch (Exception e) {

        }
        System.exit(status);
    }

    @Override
    public int run(String[] args) throws Exception {
        final Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, CounterDriver.class.getSimpleName());
        job.setJarByClass(CounterDriver.class);
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        Path input = new Path(args[0]);
        final Path output = new Path(args[1]);
        FileInputFormat.addInputPath(job, input);
        FileOutputFormat.setOutputPath(job, output);
        boolean result = job.waitForCompletion(true);
        return result ? 0 : 1;
    }

    class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
        Text outKey = new Text();
        LongWritable outValue = new LongWritable(1L);

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            /***
             * 从程序上下文对象获取一个全局计数器：用于统计apple出现的个数
             *  需要指定计数器组 和计数器的名字
             * {@link TaskAttemptContext#}
             */
            Counter counter = context.getCounter("lanyxp_counters", "hadoop Counter");
            String values = value.toString();
            final String[] splitValues = values.split("\\s+");
            for (String splitValue : splitValues) {
                if (incr.equals(splitValue)) {
                    counter.increment(1L);
                }
                context.write(outKey, outValue);
            }
        }
    }

    class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
        LongWritable outVlue = new LongWritable();

        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
            Long count = 0L;
            for (LongWritable value : values) {
                count += value.get();
            }
            outVlue.set(count);
            context.write(key, outVlue);
        }
    }

}
