package com.udf;

import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/*
创建一个UDAF函数，接收2个字符串参数，第一个值是目标字符串，
第二个是分隔符，用分割符分割第一个字符串，把分割的每个子串作为一行返回。
 */
public class MyUDTF extends GenericUDTF {

    /*
        定义返回数据的列名 和 列值的数据类型
         */
    @Override
    public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentException {
        // 设置列名
        List<String> fieldNames = new ArrayList<>();
        fieldNames.add("split_str");

        "abc".length();

        Integer[] arr = {1,2,3};
        System.out.println(arr.length);

        // 设置列名的数据类型
        List<ObjectInspector> fieldTypes = new ArrayList<>();
        fieldTypes.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);

        // getStandardStructObjectInspector(List<String> structFieldNames, List<ObjectInspector> structFieldObjectInspectors)
        return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldTypes);
    }

    private Object[] dataArr = new Object[1];

    @Override
    public void process(Object[] args) throws HiveException {
        String targetStr = args[0].toString();
        String splitStr = args[1].toString();

        String[] arr = targetStr.split(splitStr);

        for (String str : arr) {
            dataArr[0] = str;
            forward(str);
        }

    }

    @Override
    public void close() throws HiveException {

    }
}
