import bean.User;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.ReduceFunction;
import org.apache.spark.sql.*;
import scala.Tuple2;

/**
 * @Author:wsl
 * @Data:2023/05/04/14:08
 * @Description:
 */
public class Demo {
    public static void main(String[] args) {

        SparkConf conf = new SparkConf().setAppName("sparksql").setMaster("local[*]");
        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
        System.setProperty("HADOOP_USER_NAME","atguigu");

        Dataset<Row> lineDS = spark.read().json("user.json");

        Dataset<User> userDS = lineDS.as(Encoders.bean(User.class));
        userDS.printSchema();
        userDS.show();

        Dataset<User> userDataset = lineDS.map(new MapFunction<Row, User>() {
            @Override
            public User call(Row value) throws Exception {
                return new User(value.getLong(0), value.getString(1));
            }
        }, Encoders.bean(User.class));

        userDataset.sort(new Column("age")).show();


        // 区分
        RelationalGroupedDataset groupByDS = userDataset.groupBy("name");
        // 后续方法不同
        Dataset<Row> count = groupByDS.count();


        // 推荐使用函数式的方法  使用更灵活
        KeyValueGroupedDataset<String, User> groupedDataset = userDataset.groupByKey(new MapFunction<User, String>() {
            @Override
            public String call(User value) throws Exception {

                return value.name;
            }
        }, Encoders.STRING());

        // 聚合算子都是从groupByKey开始
        // 推荐使用reduceGroup
        Dataset<Tuple2<String, User>> result = groupedDataset.reduceGroups(new ReduceFunction<User>() {
            @Override
            public User call(User v1, User v2) throws Exception {
                // 取用户的大年龄
                return new User(Math.max(v1.age, v2.age), v1.name);
            }
        });

        result.show();


        spark.stop();
    }
}
