package com.shujia.spark.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo6DFToRDD {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("rdd")
      .master("local")
      .getOrCreate()

    import spark.implicits._

    val studentDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")

    studentDF.printSchema()

    //将DF转换成RDD
    val studentRDD: RDD[Row] = studentDF.rdd

    val stuRDD: RDD[(String, String, Long, String, String)] = studentRDD.map((row: Row) => {
      //通过列名获取数据
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Long = row.getAs[Long]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    //stuRDD.foreach(println)

    val caseRDD: RDD[(String, String, Long, String, String)] = studentRDD.map {
      //需要注意字段顺序
      case Row(age: Long, clazz: String, gender: String, id: String, name: String) =>
        (id, name, age, gender, clazz)
    }

    caseRDD.foreach(println)

  }

}
