package p5p4Plus;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class p5PlusReducer extends Reducer<Text, IntWritable, Text, LongWritable> {
    private LongWritable valueOut=new LongWritable();
    private long max=Long.MIN_VALUE;
    private Text keyout=new Text();
    //这样写可以不用每次调用reduce方法的时候都实例化一次，只是在实例化Mapper类的时候实例化一次。

    //生命周期中setup和cleanup只执行一次，不需要重写
    //因为数据中key相同被分到了一个组中，reduce是一组一组的处理，而不是单个处理
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        int  sum=0;
        for (IntWritable value:values){
            sum+=value.get();
        }
        //LongWritable valueOut=new LongWritable();
        if(sum>max){
            max=sum;
            keyout=key;
        }
        valueOut.set(max);

    }

    @Override
    protected void cleanup(Context context) throws IOException, InterruptedException {
        context.write(keyout,valueOut);
    }
}