package com.caul.demo.hadoop.mapreduce.word;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * KEYIN - 是mapper阶段的业务逻辑方法的返回值的KEY类型 (本例:字符) String -> Text
 * VALUEIN - 是mapper阶段的业务逻辑方法的返回值的VALUE类型 (本例:字符数) Integer -> IntWritable
 * <p>
 * KEYOUT - 是用户业务逻辑方法的返回值的KEY类型 (本例:字符) String -> Text
 * VALUEOUT - 是用户业务逻辑方法的返回值的VALUE类型 (本例:字符数) Integer -> IntWritable
 * Created by sdliang on 2018/3/30.
 */
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

  /**
   * @param key
   * @param values  是相同key的, 所有value的集合迭代器
   * @param context
   * @throws IOException
   * @throws InterruptedException
   */
  @Override
  protected void reduce(Text key, Iterable<IntWritable> values, Context context)
      throws IOException, InterruptedException {
    int count = 0;
    for (IntWritable intWritable : values) {
      count += intWritable.get();
    }

    context.write(key, new IntWritable(count));
  }
}
