package MR_process.MR2;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;

import MR_process.Bean1;
import MR_process.Bean2;

public class Reduce2 extends Reducer<Text,Bean1,Bean2,NullWritable> {
    // private SimpleGroupFactory groupFactory;
    // private MessageType schema;
    private final long countBase = 100000;
    @Override
    public void reduce(Text key, Iterable<Bean1> values, Context context) throws IOException, InterruptedException {
        long distinct_doc_id = 0;
        // 将 values 的内容存入集合
        List<Bean1> beanList = new ArrayList<>();
        for (Bean1 bean : values) {
            beanList.add(new Bean1(bean.getWord(), bean.getDoc_id(), bean.getCount()));
            distinct_doc_id++;
            if(distinct_doc_id>=countBase) return; 
        }

        // 遍历集合以输出结果
        for (Bean1 bean : beanList) {
            context.write(new Bean2(bean.getWord(), bean.getDoc_id(), bean.getCount(), distinct_doc_id), NullWritable.get());
        }
    }

}
