package com.shujia.optimize

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object DemoMaxAge {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("maxage")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext
    val studentsRDD: RDD[String] = sc.textFile("Spark/data/stu/students.txt")

    /**
      * 计算每一个班级最大的年龄
      *
      */
    val kvRDD: RDD[(String, Int)] = studentsRDD
      .map(_.split(","))
      .map {
        case Array(_: String, _: String, age: String, _: String, clazz: String) =>
          (clazz, age.toInt)
      }


    val maxAgeRDD1: RDD[(String, Int)] = kvRDD
      .groupByKey()
      .map {
        case (clazz: String, ages: Iterable[Int]) =>
          (clazz, ages.max)
      }

    maxAgeRDD1.foreach(println)


    /**
      * 使用reducebyKey计算组内最大值
      *
      */

    val maxAgeRDD2: RDD[(String, Int)] = kvRDD.reduceByKey {
      case (x, y) =>
        if (x > y) {
          x
        } else {
          y
        }
    }

    maxAgeRDD2.foreach(println)


    /**
      * 计算每一个班级的平均年龄
      *
      */

    val avgAgeRDD1: RDD[(String, Double)] = kvRDD.groupByKey()
      .map {
        case (clazz: String, ages: Iterable[Int]) =>
          val avgAge: Double = ages.sum.toDouble / ages.size
          (clazz, avgAge)
      }

    avgAgeRDD1.foreach(println)


    /**
      *
      * 使用reduceBykey计算平均值
      *
      */

    val kvvRDD: RDD[(String, (Int, Int))] = kvRDD.map {
      case (clazz, age) =>
        (clazz, (age, 1))
    }


    val sumRDD: RDD[(String, (Int, Int))] = kvvRDD.reduceByKey {
      case ((age1, x1), (age2, x2)) =>
        (age1 + age2, x1 + x2)
    }


    val avgAgeRDD2: RDD[(String, Double)] = sumRDD.map {
      case (clazz, (sumAge, num)) =>
        (clazz, sumAge.toDouble / num)
    }

    avgAgeRDD2.foreach(println)

    while (true) {

    }

  }

}
