package com.hadoop.mapreduce.Partitioner;

import entity.UserEntity;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/*
 * Reducer需要定义四个输出、输出类型泛型：
 * 四个泛型类型分别代表：
 * KeyIn        Reducer的输入数据的Key，这里是Text UserEntity
 * ValueIn      Reducer的输入数据的Value，这里是序列化对象UserEntity
 * KeyOut       Reducer的输出数据的Key，这里是序列化对象UserEntity
 * ValueOut     Reducer的输出数据的Value，NullWritable
 *
 * 一个key调一次这里用的是相同key，数据放value中，不用老是打开关闭文件，所以能全写进去
 *
 */
public class UserReducer extends Reducer<Text,UserEntity,UserEntity,NullWritable> {


    @Override
    protected void reduce(Text key, Iterable<UserEntity> values, Context context) throws IOException, InterruptedException {
        for (UserEntity userEntity : values) {
            // 年收入 = 月收入 * 12  四舍五入
            String yearIncome = String.format("%.2f", userEntity.getMonthIncome() * 12);
            userEntity.setYearIncome(Double.parseDouble(yearIncome));
            System.out.println("*************Thread id = "+Thread.currentThread().getId()+
                    "this.add="+this.hashCode()+";UserEntity="+userEntity.toString());
            //System.out.println("****************UserReducer "+Thread.currentThread().getId()+"**********************");
            //System.out.println("UserReducer - reduce ="+userEntity.toString());
            context.write(userEntity, NullWritable.get());
        }
    }
}
