package top.doe.spark_sql;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.*;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.types.DataTypes;

public class Hanshu {
    public static void main(String[] args) throws AnalysisException {

        // 创建sparkSession
        SparkSession session =
                SparkSession.builder()
                        .appName("demo1")
                        .master("local")
                        .getOrCreate();

        // 把数据源映射成dataset
        Dataset<Row> ds = session.read().json("spark_data/excersize_3/input");
        ds.createTempView("ds");
        ds.printSchema();
        session.sql("select * from ds").show();

        session.udf().register("my_avg", functions.udaf(new MyAvg2(),Encoders.DOUBLE()));

        session.sql("select uid,my_avg(amt) as amt from ds group by uid").write().option("parquet.compression","NONE").parquet("sql_data/parout");


        session.udf().register("up", (UDF1<String, String>) String::toUpperCase, DataTypes.StringType);
        session.sql("select up(oid) from ds").show();


    }
}
