package MR_process.MR1;

import java.io.IOException;

import org.apache.hadoop.mapreduce.Reducer;

import MR_process.Bean1;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;

public class Reduce1 extends Reducer<Text, LongWritable, Bean1, NullWritable> {

    // private SimpleGroupFactory groupFactory;
    // private MessageType schema;
    @Override
    public void reduce(Text key, Iterable<LongWritable> values, Context context)
            throws IOException, InterruptedException 
    {
        long sum = 0;
        // 计算每个单词的总计数
        for (LongWritable one : values) {
            sum++;
        }
        String[] splits = key.toString().split(" ");
        String doc_id =splits[0];
        String word = splits[1];
        context.write(new Bean1(word,doc_id,sum), NullWritable.get());
    }
}
