package com.mr.reducejoin;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.HashMap;

import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;

/*
 *       orderId pId pName  amount  source
 * (null,1001	01	noData	1	order.txt)
 * (null,noData	01	小米	noData	pd.txt)
 *
 * 在输出之前，需要把数据按照source属性分类。只能在reduce里面分类
 */

public class ReduceJoinReducer extends Reducer<NullWritable, JoinBean, NullWritable, JoinBean>{

	private ArrayList<JoinBean> orderDatas = new ArrayList<>();
	private HashMap<String, String> pdDatas = new HashMap<>();

	// 根据source分类
	@Override
	protected void reduce(NullWritable key, Iterable<JoinBean> values,
						  Reducer<NullWritable, JoinBean, NullWritable, JoinBean>.Context context)
			throws IOException, InterruptedException {

		for (JoinBean value : values) {

			if(value.getSource().equals("order.txt")) {

				// 将value对象的属性数据取出，封装到一个新的JoinBean中
				// 因为value至始至终都是同一个对象，只不过每次迭代，属性会随之变化
				JoinBean jb = new JoinBean();

				try {
					// hadoop提供的在两个Bean之间拷贝属性的工具类
					BeanUtils.copyProperties(jb, value);
				} catch (IllegalAccessException e) {
					e.printStackTrace();
				} catch (InvocationTargetException e) {
					e.printStackTrace();
				}

				orderDatas.add(jb);

			}else {

				pdDatas.put(value.getpId(), value.getpName());

			}

		}

	}

	// join数据，写出
	@Override
	protected void cleanup(Reducer<NullWritable, JoinBean, NullWritable, JoinBean>.Context context)
			throws IOException, InterruptedException {

		// 只输出来自orderDatas的数据
		for (JoinBean joinBean : orderDatas) {

			// 根据Map中的pId取出pName，设置到bean的pName属性
			joinBean.setpName(pdDatas.get(joinBean.getpId()));

			context.write(NullWritable.get(), joinBean);

		}

	}
}
