package com.niit.sequenceFile.write;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.Iterator;

/**
 * Date:2024/10/15
 * Author：Ys
 * Description:
 *
 */
public class WriteSeqReducer  extends Reducer<NullWritable, Text,NullWritable,Text> {

    //定义一个输出的key 输出的是一个空值
    private NullWritable outputKey;

    //用于初始化，只会执行一次
    @Override
    protected void setup(Reducer<NullWritable, Text, NullWritable, Text>.Context context) throws IOException, InterruptedException {
        this.outputKey = NullWritable.get();
    }

    //reduce方法，执行多次   接收到Mapper中写出的数据   放到values
    @Override                               // values 一组数据
    protected void reduce(NullWritable key, Iterable<Text> values, Reducer<NullWritable, Text, NullWritable, Text>.Context context) throws IOException, InterruptedException {

        Iterator<Text> iterator = values.iterator();// 得到这一组数组的迭代器    快捷键 alt + enter
        while(iterator.hasNext()){// 当前迭代器是否有新（下一个）数据   是：true  否:false
                        // null      移动指针得到迭代器中的一个数据
            context.write(outputKey,iterator.next()); //reduce的write是直接将数据输出到文件
        }
    }

    @Override
    protected void cleanup(Reducer<NullWritable, Text, NullWritable, Text>.Context context) throws IOException, InterruptedException {
        this.outputKey = null;
    }
}
