package com.zhl.hive.custom;

import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;

import java.util.ArrayList;
import java.util.List;

/**
 * @program: demos
 * @description:
add jar /home/zhl/hadoop/modules/hive-3.1.2/lib/hive-1.0-SNAPSHOT.jar;

create temporary function my_explode as "com.zhl.hive.custom.SplitUdtf";

 * @author: 刘振华
 * @create: 2020-11-25 14:28
 **/
public class SplitUdtf extends GenericUDTF {
	private List<String> rs = new ArrayList<>(1);
	@Override
	public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentException {

		//输出数据的默认别名，可以被覆盖
		List<String> fieldNames = new ArrayList<>(1);
		fieldNames.add("word");
		//输出数据的类型
		List<ObjectInspector> fieldOIs = new ArrayList<>(1);
		fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
		return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs);
	}

	@Override
	public void process(Object[] objects) throws HiveException {
		String line = objects[0].toString();
		String[] strs = line.split(",");
		for(String str : strs){
			rs.clear();
			rs.add(str);
			forward(rs);

		}
	}

	@Override
	public void close() throws HiveException {

	}
}
