package com.shujia.spark.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo9DFToRDD {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("rddondf")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._

    /**
      * 创建好SparkSession之后可以直接获取SparkContext
      *
      */
    val sc: SparkContext = spark.sparkContext

    //读取文件得到一个RDD
    val lineRDD: RDD[String] = sc.textFile("data/students.txt")

    //1、将数据拆分
    val studentRDD: RDD[(String, String, Int, String, String)] = lineRDD.map(line => {
      val split: Array[String] = line.split(",")
      (split(0), split(1), split(2).toInt, split(3), split(4))
    })

    /**
      * 将RDD转换成DF
      * 需要注意字段的数量和顺序
      */
    val studentDF: DataFrame = studentRDD.toDF("id", "name", "age", "gender", "clazz")
    studentDF.printSchema()
    studentDF.show()

    /**
      * DF转换成RDD
      *
      */

    val stuRDD: RDD[Row] = studentDF.rdd

    //1、通过字段名和类型获取数据
    val kvRDD: RDD[(String, String, Int, String, String)] = stuRDD.map(row => {
      //通过字段名获取数据
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Int = row.getAs[Int]("age")
      val gender: String = row.getAs[String]("gender")
      val clazz: String = row.getAs[String]("clazz")
      (id, name, age, gender, clazz)
    })

    //    kvRDD.foreach(println)


    //2、使用case匹配的方式解析
    val caseRDD: RDD[(String, String, Int, Int, String)] = stuRDD.map {
      case Row(id: String, name: String, age: Int, gender: String, clazz: String) =>
        (id, name, age, age, clazz)
    }

    caseRDD.foreach(println)

  }

}
