package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo7GroupByKey {

  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("map")
    val sc: SparkContext = new SparkContext(conf)


    val rdd1: RDD[String] = sc.textFile("spark/data/students.txt")


    /**
      * 统计平均年龄
      *
      */

    val rdd2: RDD[(String, Int)] = rdd1.map(line => {
      val clazz: String = line.split(",")(4)
      val age: Int = line.split(",")(2).toInt
      (clazz, age)
    })

    /**
      * groupByKey  通过key进行分组
      *
      * 会参数shuffle
      *
      */

    val rdd3: RDD[(String, Iterable[Int])] = rdd2.groupByKey()

    /**
      * 迭代器，
      * 1、只能迭代一次
      * 2、数据不会全部加载到内存
      * 3、没有直接回去长度的方法
      *
      *
      */

    rdd3.map(kv => {
      val clazz: String = kv._1

      //同一个key的所有value
      val ages: Iterable[Int] = kv._2


      var sum: Double = 0
      var num: Double = 0

      for (age <- ages) {
        sum = sum + age
        num += 1
      }

      val avg: Double = sum / num


      (clazz, avg)
    }).foreach(println)


  }
}
