package com.shujia.spark.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo5DFOnRDD {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("rddondf")
      .config("spark.sql.shuffle.partitions", 1) //指定spark sql shuffle之后rdd的分区数
      .getOrCreate()
    import spark.implicits._

    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    /**
     * 1、将rdd转换成DF
     */

    val linesRDD: RDD[String] = sc.textFile("data/students.txt")

    //切分数据
    val studentRDD: RDD[(String, String, Int, String, String)] = linesRDD.map((line: String) => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val name: String = split(1)
      val age: Int = split(2).toInt
      val gender: String = split(3)
      val clazz: String = split(4)
      (id, name, age, gender, clazz)
    })
    // rdd 转 df  指定列名
    val studentDF: DataFrame = studentRDD.toDF("id", "name", "age", "gender", "clazz")
    studentDF.show()

    /**
     * 2、将DF转换成 RDD
     *
     */

    val stuRDD: RDD[Row] = studentDF.rdd

    //解析数据
    stuRDD.map((row: Row) => {
      //通过列名和类型获取数据
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      (id, name)
    }).foreach(println)

    stuRDD.map {
      case Row(id: String, name: String, age: Int, gender: String, clazz: String) =>
        (id, name, age, gender, clazz)
    }.foreach(println)

  }

}
