package com.bigdata.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.util.AccumulatorV2


object SparkSQL_Avg_Age_2_Acc {

    def main(args: Array[String]): Unit = {

        // TODO 计算平均年龄
        val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
        val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()
        import spark.implicits._

        val df: DataFrame = spark.read.json("datas/user.json")
        val rdd: RDD[Row] = df.rdd

        val ageAcc = new MyAcc()
        spark.sparkContext.register(ageAcc, "ageAcc")

        rdd.foreach(
            (row: Row) => {
                ageAcc.add(row.getLong(0))
            }
        )
        println(ageAcc.value)

        spark.stop()
    }


    class MyAcc extends AccumulatorV2[Long, Long] {

        var age = 0L
        var cnt = 0

        override def isZero: Boolean = {
            age == 0 && cnt == 0
        }

        override def copy(): AccumulatorV2[Long, Long] = {
            val newMyAc = new MyAcc
            newMyAc.age = this.age
            newMyAc.cnt = this.cnt
            newMyAc
        }

        override def reset(): Unit = {
            age = 0
            cnt = 0
        }

        override def add(v: Long): Unit = {
            age += v
            cnt += 1
        }

        override def merge(other: AccumulatorV2[Long, Long]): Unit = {
            other match {
                case o: MyAcc => {
                    age += o.age
                    cnt += o.cnt
                }
                case _ =>
            }
        }

        override def value: Long = {
            age / cnt
        }
    }
}
