package com.shujia.mr.wc3;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
/*
    编写reduce类，主要是编写如何处理每一个分组的数据
    在分好组之前，reduce任务干了拉取相同分区编号的数据，去掉分区编号，归并排序，分组。

    步骤：
        1）继承Reducer类，要去定义输入的键值类型，输出的键值类型,大部分情况下与map任务产生的键值对类型一样
            注意：这里定义类型是reduce任务拉取数据的键值类型
        2）重写reduce方法


    输入：
        <"hello",1>
        ...

        到达reduce方法代码逻辑之前，变成：<"hello",[1,1,1,1,1,1,1]>

    输出：
        <"hello", 7>
 */


public class WordCountReducer3 extends Reducer<Text, LongWritable, Text, LongWritable> {
    @Override
    protected void setup(Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        //在整个reduce任务之前执行一遍
    }

    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        //每一个分组数据都会执行一遍reduce代码逻辑
        //<"hello",[1,1,1,1,1,1,1]>
        long sum = 0L;
        for (LongWritable value : values) {
            long num = value.get();
            sum+=num;
        }
        //使用context上下文对象将结果写出去，至于怎么写，我们不用管，底层已经实现
        context.write(key, new LongWritable(sum));
    }

    @Override
    protected void cleanup(Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        //在整个reduce任务执行结束之后执行一遍
    }
}
