package com.smzdm.functions.udtf;

import java.util.ArrayList;

import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.UDFType;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
/**
 * http://www.mamicode.com/info-detail-640789.html【hiveudtf开发demo，一行输出多行，比如行转列】
 * http://www.cnblogs.com/ggjucheng/archive/2013/02/01/2888819.html【hive udtf sql中用法】
 * http://blog.csdn.net/wisgood/article/details/26166897【hive udtf简单示例】
 * 
 * add jar /home/isuhadoop/tests/udf/column2row.jar;
 * CREATE TEMPORARY FUNCTION col2row AS 'com.analysys.column2row.utils.Column2Row';
 * Hive中有3种UDF：
 * 	UDF： 操作单个数据行，产生单个数据行；
 * 	UDAF： 操作多个数据行，产生一个数据行。
 * 	UDTF： 操作一个数据行，产生多个数据行一个表作为输出。
 * select col2row(t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12) as (col1,col2) from  bds_dev_user_bhv_sum_d limit 100;
 * 
 * select userbhv.device_id, mytable.col1, mytable.col2 from bds.bds_dev_user_bhv_sum_d userbhv lateral view col2row(t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20,t21,t22,t23) mytable as col1, col2 where day='20160720' limit 50;
 * 
 * @author root
 * 20160721
 *
 */
@Description(name = "col2row", value = "_FUNC_(colValue0, colValue1, colValue2 …) - one row data convert to multiple row data, " 
+ "last output coordinate row data.", extended = "1、can deirectly use :select col2row(properties) as (col1,col2) from src; " 
+ "2、can not add other field:select a, col2row(properties) as (col1,col2) from src, "
+ "3、can not nested use :select col2row(col2row(properties)) from src, "
+ "4、can not with group by/cluster by/distribute by/sort by: select col2row(properties) as (col1,col2) from src group by col1, col2, " 
+ "."
+ "example: select col2row('10','12','13','15','100') as (col1,col2) from tmp.dual;") 
@UDFType(deterministic = false) 
public class UDTFColumn2Row extends GenericUDTF{
	
	// 返回UDTF的处理行的信息（个数，类型）
	public StructObjectInspector initialize(ObjectInspector[] args)
            throws UDFArgumentException {
        if (args.length <=0) {
            throw new UDFArgumentLengthException("Column2Row takes more than one argument");
        }
        if (args[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
            throw new UDFArgumentException("Column2Row takes string as a parameter");
        }
        //一行输出多列
        //返回的列名list
        ArrayList<String> fieldNames = new ArrayList<String>();
        //返回列值数据类型list
        ArrayList<ObjectInspector> fieldOIs = new ArrayList<ObjectInspector>();
        fieldNames.add("col1");
        fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
        fieldNames.add("col2");
        fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);

        return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames,fieldOIs);
    }
	@Override
	public void process(Object[] args) throws HiveException {
		String [] retValue= new String[2];
		int i=0;
		//一行输出多列
		for(Object obj:args){
//			try {
				retValue[0]="t"+i;
				retValue[1]=obj==null?"":obj.toString();
				forward(retValue);
				i++;
//			}catch (Exception e) {
//                continue;
//            }
		}
	}

	@Override
	public void close() throws HiveException {
	}
}

