package com.zzl.spark.sql.UDF_UDAF;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.api.java.UDF2;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.ArrayList;
import java.util.List;


/**
 * UDF
 * UDF自定义函数:统计每一个单词的长度
 * UDFA(用户自定义聚合函数)
 * UDF&UDFA
 * UDF 输入一条数据,输出一条数据
 * UDFA 输入多条数据,输出多条数据
 */
public class UDFJava {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local").setAppName("DataFrameOps");
        JavaSparkContext sc = new JavaSparkContext(conf);
        System.setProperty("hadoop.home.dir", "E:\\hadoop");
        sc.setLogLevel("WARN");

        SQLContext sqlContext =new SQLContext(sc); //new SQLContext(sc);
        List<String> list = new ArrayList<>();
        list.add("yarn");
        list.add("marry");
        list.add("jack");
        list.add("to    m");
        list.add("tom");

        JavaRDD<String> parallelize = sc.parallelize(list);

        JavaRDD<Row> map = parallelize.map(RowFactory::create);

        ArrayList<StructField> structFields = new ArrayList<>();
        structFields.add(DataTypes.createStructField("name",DataTypes.StringType,true));

        StructType structType = DataTypes.createStructType(structFields);

        Dataset<Row> dataFrame = sqlContext.createDataFrame(map, structType);

        dataFrame.registerTempTable("nameTable");

        sqlContext.udf().register("strLen", (UDF1<String, Integer>)s -> s.length(),DataTypes.IntegerType);

        sqlContext.sql("select name,strLen(name) as age from nameTable").show();
        sc.stop();
    }
}
