package com.atguigu.bigdata.spark.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.*;
import org.apache.spark.sql.expressions.Aggregator;

/*

        // 早期版本中，spark不能在sql中使用强类型UDAF操作
        // SQL & DSL
        // 早期的UDAF强类型聚合函数使用DSL语法操作
 */
public class Spark03_SparkSQL_UDAF2_JAVA {
    public static void main(String[] args) {
        SparkConf conf = new  SparkConf().setMaster("local[*]").setAppName("sparkSQL");
        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
        Dataset<Row> df   = spark.read().json("datas/user.json");
        Encoder<User> userEncoder = Encoders.bean(User.class);
        Dataset<User> ds = df.as(userEncoder);
        // 将UDAF函数转换为查询的列对象
        TypedColumn<User,Long> udafCol =  new MyAvgUDAF2().toColumn();
        ds.select(udafCol).show();

        spark.close();

    }


}
class MyAvgUDAF2 extends Aggregator<User,Buff,Long> {

    @Override
    public Buff zero() {
        return new Buff(0L,0L);
    }

    @Override
    public Buff reduce(Buff b, User a) {
        b.setTotal(b.getTotal() + a.getAge());
        b.setCount(b.getCount() + 1);
        return b;
    }

    @Override
    public Buff merge(Buff b1, Buff b2) {
        b1.setCount(b1.getCount() + b2.getCount());
        b1.setTotal(b1.getTotal() + b2.getTotal());
        return b1;
    }

    @Override
    public Long finish(Buff reduction) {
        return reduction.getTotal()/reduction.getCount();
    }

    @Override
    public Encoder<Buff> bufferEncoder() {
        return Encoders.bean(Buff.class);
    }

    @Override
    public Encoder<Long> outputEncoder() {
        return Encoders.LONG();
    }
}
