package com.study.mr.partitioner;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowReducer extends Reducer<Text,FlowBean,Text,FlowBean> {
//    private Text outk=new Text();key类型已经是Text。直接拿来用不用重新获取，所以不用定义
    private FlowBean outv=new FlowBean();
    /**
     * 核心业务逻辑处理
     * @param key
     * @param values
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        //1.循环遍历当前同一组key的values
        int totalUpFlow=0;
        int totalDownFlow=0;
        int totalSumFlow=0;
        for (FlowBean value : values) {
            totalUpFlow+=value.getUpFlow();
            totalDownFlow+=value.getDownFlow();
//            totalSumFlow+=value.getSumFlow();
        }
        //2.封装输出的数据
        outv.setUpFlow(totalUpFlow);
        outv.setDownFlow(totalDownFlow);
//        outv.setSumFlow(totalSumFlow);
        outv.setSumFlow();
        //3.输出数据
        context.write(key,outv);

    }
}
