package com.example.mapreducr.wordcount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * Created with IntelliJ IDEA.
 * ClassName: WordCountReduce
 * Package: com.example.mapreducr.wordcount
 * Description:
 * User: fzykd
 *
 * @Author: LQH
 * Date: 2023-07-15
 * Time: 20:43
 */

/**
 * 1.自定义Reducer要继承自己的父类
 * 2.Reducer的输入类型是Mapper的输出类型
 * 3.业务逻辑在reduce方法值写 相同的key执行一次reduce方法
 */
public class WordCountReduce extends Reducer<Text, IntWritable,Text,IntWritable> {

    private IntWritable outV = new IntWritable();

    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        //等到的数据是 (abc,(1,1,1,1))
        //Iterable 是一个集合类型
        //循环遍历等到数值累加起来
        int sum = 0;
        for (IntWritable value : values){
            sum += value.get();
        }
        outV.set(sum);
        //写出 得到的文本内容 key就是map传来的key
        context.write(key,outV);

    }
}
