package io.a.sql;

import io.a.function.AvgAgeAggregator;
import io.a.utils.H;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.sql.*;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.types.DataTypes;

import static io.a.utils.Const.*;

@Slf4j
public class TestJob {

    private static final String FORMAT_JDBC = "jdbc";

    private static SparkSession session;

    private static final DataFrameReader dataFrameReader = init();

    private static DataFrameReader init() {
        session = SparkSession
                .builder()
                .master("local[*]")
                .appName(TestJob.class.getName())
                .getOrCreate();

        return session.read().format(FORMAT_JDBC)
                .option(DRIVER, H.DB.getDriver()).option(URL, H.DB.getUrl())
                .option(USER, H.DB.getUser()).option(PASSWORD, H.DB.getPassword())
                /*.option("queryTimeout", "300")*/;
    }

    private static void finish() {
        if (session != null) {
            session.stop();
        }
    }


    public static void main(String[] args) {

        final Dataset<Row> userDsRow = dataFrameReader.option("query", "SELECT * FROM user").load();

        userDsRow.createOrReplaceTempView("u");

//        final Dataset<User> userDS = userDsRow.as(Encoders.bean(User.class));
//
//        userDS.foreach(user -> {
//            System.out.println(user);
//        });

        // 自定义map函数
        session.udf().register("prefixName", (UDF1<String, String>) s -> "Name : " + s,  DataTypes.StringType);

        // 自定义reduce函数
        session.udf().register("avgAge", functions.udaf(new AvgAgeAggregator(), Encoders.LONG()));

//        Dataset<Row> userRow = session.sql("SELECT prefixName(name) FROM u");
        Dataset<Row> userRow = session.sql("SELECT avgAge(age) FROM u");

        userRow.show();

        finish();
    }
}
