package hive.udaf;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.serde2.objectinspector.*;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;

import java.util.ArrayList;
import java.util.List;

/**
 * 多对一类型：接受从零行到多行的零个到多个列，然后返回单一值<br/>
 * UDAF：用户自定义聚合函数，如sum()、count()。要实现UDAF，我们需要实现下面的类：<br/>
 * org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver检查输入参数，并指定使用哪个Resolver<br/>
 * org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator主要的逻辑处理<br/>
 *	init()：当实例化UDAF的Evaluator时执行，并且指定输入输出数据的类型。
 * 	iterate()：把输入数据处理后放入到内存聚合块中（AggregationBuffer），典型的Mapper。
 * 	terminatePartial()：其为iterate()轮转结束后，返回轮转数据，类似于Combiner。
 * 	merge()：接收terminatePartial()的结果，然后把这些partial结果数据merge到一起。
 * 	terminate()：返回最终的结果。
 * 	iterate()和terminatePartial()都在Mapper端。
 * 	merge()和terminate()都在Reducer端。
 * AggregationBuffer存储中间或最终结果。通过我们定义自己的Aggregation Buffer，可以处理任何类型的数据。
 * ---------------------
 */
@Description(
	name = "collect",
	value = "_FUNC_(col) - The parameter is a column name. The return value is a set of the column.",
	extended = "Example:\n  > SELECT _FUNC_(col) from src;"
)
public class GenericUDAFCollect extends AbstractGenericUDAFResolver {
	private static final Log LOG = LogFactory.getLog(GenericUDAFCollect.class.getName());
	
	public GenericUDAFCollect() {
		// TODO Auto-generated constructor stub
	}
	
	@Override
	public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
			throws SemanticException {
		
		if(parameters.length != 1){
			throw new UDFArgumentTypeException(parameters.length - 1, 
					"Exactly one argument is expected.");
		}
		
		if(parameters[0].getCategory() != ObjectInspector.Category.PRIMITIVE){
			throw new UDFArgumentTypeException(0, 
					"Only primitive type arguments are accepted but "
					+ parameters[0].getTypeName() + " was passed as parameter 1.");
		}
		
		return new GenericUDAFCollectEvaluator();
	}
	
	@SuppressWarnings("deprecation")
	public static class GenericUDAFCollectEvaluator extends GenericUDAFEvaluator{
		
		private PrimitiveObjectInspector inputOI;
		private StandardListObjectInspector internalMergeOI;
		private StandardListObjectInspector loi;

		/**
		 * 初始化
		 * UADF的Enum GenericUDAFEvaluator.Mode。Mode有4中情况：<br/>
		 *  * PARTIAL1：Mapper阶段。从原始数据到部分聚合，会调用iterate()和terminatePartial()。<br/>
		 *  * PARTIAL2：Combiner阶段，在Mapper端合并Mapper的结果数据。从部分聚合到部分聚合，会调用merge()和terminatePartial()。<br/>
		 *  * FINAL：Reducer阶段。从部分聚合数据到完全聚合，会调用merge()和terminate()。<br/>
		 *  * COMPLETE：出现这个阶段，表示MapReduce中只用Mapper没有Reducer，所以Mapper端直接输出结果了。从原始数据到完全聚合，会调用iterate()和terminate()。<br/>
		 * @param m
		 * @param parameters
		 * @return
		 * @throws HiveException
		 */
		@Override
		public ObjectInspector init(Mode m, ObjectInspector[] parameters)
				throws HiveException {
			super.init(m, parameters);
			
			if(m == Mode.PARTIAL1 || m == Mode.COMPLETE){
				inputOI = (PrimitiveObjectInspector) parameters[0];
				return ObjectInspectorFactory.getStandardListObjectInspector(ObjectInspectorUtils.getStandardObjectInspector(inputOI));
			}
			else if(m == Mode.PARTIAL2 || m == Mode.FINAL){
				internalMergeOI = (StandardListObjectInspector) parameters[0];
				inputOI = (PrimitiveObjectInspector) internalMergeOI.getListElementObjectInspector();
				loi = ObjectInspectorFactory.getStandardListObjectInspector(inputOI);
				return loi;
			}
			return null;
		}

		static class ArrayAggregationBuffer implements AggregationBuffer{
			List<Object> container;
		}

		/**
		 * AggregationBuffer保存数据处理的临时结果
		 * @return
		 * @throws HiveException
		 */
		@Override
		public AggregationBuffer getNewAggregationBuffer() throws HiveException {
			ArrayAggregationBuffer ret = new ArrayAggregationBuffer();
			reset(ret);
			return ret;
		}

		/**
		 * 重新设置AggregationBuffe
		 * @param agg
		 * @throws HiveException
		 */
		@Override
		public void reset(AggregationBuffer agg) throws HiveException {
			((ArrayAggregationBuffer) agg).container = new ArrayList<Object>();
		}

		/**
		 * 处理输入记录
		 * @param agg
		 * @param param
		 * @throws HiveException
		 */
		@Override
		public void iterate(AggregationBuffer agg, Object[] param) throws HiveException {
			Object p = param[0];
			if(p != null){
				putIntoList(p, (ArrayAggregationBuffer)agg);
			}
		}

		/**
		 * 把两个部分数据聚合起来
		 * @param agg
		 * @param partial
		 * @throws HiveException
		 */
		@Override
		public void merge(AggregationBuffer agg, Object partial) throws HiveException {
			ArrayAggregationBuffer myAgg = (ArrayAggregationBuffer) agg;
			ArrayList<Object> partialResult = (ArrayList<Object>) this.internalMergeOI.getList(partial);
			for(Object obj : partialResult){
				putIntoList(obj, myAgg);
			}
		}

		/**
		 * 输出最终结果
		 * @param agg
		 * @return
		 * @throws HiveException
		 */
		@Override
		public Object terminate(AggregationBuffer agg) throws HiveException {
			ArrayAggregationBuffer myAgg = (ArrayAggregationBuffer) agg;
			ArrayList<Object> list = new ArrayList<Object>();
			list.addAll(myAgg.container);
			return list;
		}

		/**
		 * 处理全部输出数据中的部分数据
		 * @param agg
		 * @return
		 * @throws HiveException
		 */
		@Override
		public Object terminatePartial(AggregationBuffer agg) throws HiveException {
			ArrayAggregationBuffer myAgg = (ArrayAggregationBuffer) agg;
			ArrayList<Object> list = new ArrayList<Object>();
			list.addAll(myAgg.container);
			return list;
		}
		
		public void putIntoList(Object param, ArrayAggregationBuffer myAgg){
			Object pCopy = ObjectInspectorUtils.copyToStandardObject(param, this.inputOI);
			myAgg.container.add(pCopy);
		}
	}
}

