package com.bigdata.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo6DFToRDD {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("rdd")
      .master("local")
      .getOrCreate()

    import spark.implicits._

    val studentDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")

    studentDF.printSchema()

    // DataFrame 转为RDD
    val stuRDD: RDD[Row] = studentDF.rdd

//    val mapRDD: RDD[(String, String, Long, String, String)] = stuRDD.map {
//      //需要注意字段顺序
//      case Row(age: Long, clazz: String, gender: String, id: String, name: String) =>
//        (id, name, age, gender, clazz)
//    }

    val mapRDD: RDD[(String, String, Long, String, String)] = stuRDD.map(row => {
      //通过列名获取数据
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Long = row.getAs[Long]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    // 求每个班级的学生数，只是演示转RDD的使用，实际上使用sql更加简洁，可读性更高
    val groRDD: RDD[(String, Iterable[(String, String, Long, String, String)])] =
      mapRDD.groupBy(tup => tup._5)
    groRDD.map(kv => {
      val clazz: String = kv._1
      val count: Int = kv._2.toList.size
      (clazz, count)
    }).foreach(println)


  }

}
