package com.itheima.udtf;


import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;

import java.util.ArrayList;
import java.util.List;
import java.util.function.ObjDoubleConsumer;


import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;

public class MyUDTF extends GenericUDTF {

    private final transient Object[] forwardListObj=new Object[1];
    //初始化方法，规定函数输入输出的类型。
    @Override
    public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentException {
        //设置列名的类型
        List<String> fieldNames = new ArrayList<>();
        //设置列名
        fieldNames.add("column_01");
        List<ObjectInspector> fieldOIs = new ArrayList<ObjectInspector>()  ;//检查器列表

        //设置输出的列的值类型
        fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);

        return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs);
    }


    @Override
    public void process(Object[] objects) throws HiveException {
        //1、获取用户执行函数的传递进来的两个参数
        String str =objects[0].toString();
        String split = objects[1].toString();

        //2、对数据进行切割操作
        String[] split1 = str.split(split);
        //
        for (String word : split1) {
            forwardListObj[0]=word;
            forward(forwardListObj);
        }
    }

    @Override
    public void close() throws HiveException {

    }
}
