package com.udf.hive.udf.extract;

import java.util.logging.Logger;

import org.apache.hadoop.hive.ql.exec.UDF;
import org.apache.hadoop.io.Text;

/**
 * 数据提取-位点
 * 
 * @作者 tao zhou
 * @version [版本号, 2020年7月18日]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class IndexExtractUDF extends UDF
{

    private static Logger logger = Logger.getLogger("IndexExtractUDF");

    /**
     * 数据处理逻辑
     * 
     * @param inputField
     *            输入字段
     * @param startIndex
     *            开始位点
     * @param endIndex
     *            结束位点
     * @return
     * @exception/throws [违例类型] [违例说明]
     * @see [类、类#方法、类#成员]
     */
    public Text evaluate(Text inputField, Text startIndex, Text endIndex) {
        if (inputField == null) {
            return null;
        }
        String inputFieldStr = String.valueOf(inputField).trim();
        String startIndexStr = String.valueOf(startIndex).trim();
        String endIndexStr = String.valueOf(endIndex).trim();
        Integer startIndexInt = Integer.parseInt(startIndexStr);
        Integer endIndexInt = Integer.parseInt(endIndexStr);
        String result = null;
        // 目前只对整型做转换
        try {
            result = indexExtract(inputFieldStr, startIndexInt, endIndexInt);
        }
        catch (Exception e) {
            e.printStackTrace();
            result = inputFieldStr;
        }

        return new Text(result);
    }

    public String indexExtract(String inputFieldStr, Integer startIndexInt, Integer endIndexInt) {
        return inputFieldStr.substring(startIndexInt, endIndexInt);
    }

    public static void main(String[] args) {
        logger.info(String.valueOf(new IndexExtractUDF().evaluate(new Text("江苏省苏州市"), new Text("3"), new Text("6"))));
    }
}
