package com.shujia.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo09RDDToDF {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo09RDDToDF")
      .master("local")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val stuRDD: RDD[String] = spark.sparkContext.textFile("spark/data/stu/students.txt")

    // 手动指定列名
    val stuRDDToDF: DataFrame = stuRDD.map(line => {
      val splits: Array[String] = line.split(",")
      (splits(0), splits(1), splits(2).toInt, splits(3), splits(4))
    }).toDF("id", "name", "age", "gender", "clazz")

    stuRDDToDF.show(10)

    // 使用样例类对象来进行转换
    val stuRDDToDFByCaseClass: DataFrame = stuRDD.map(line => {
      val splits: Array[String] = line.split(",")
      Stu(splits(0), splits(1), splits(2).toInt, splits(3), splits(4))
    }).toDF()

    stuRDDToDFByCaseClass.show()

    // DF to RDD
    val stuDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id String,name String,age Int,gender String,clazz String")
      .load("spark/data/stu/students.txt")

    val rdd: RDD[Row] = stuDF.rdd
    // 提取id name age列
    rdd.map(row => {
      val id: String = row.getAs[String]("id")
      val name: String = row.getAs[String]("name")
      val age: Int = row.getAs[Int]("age")
      (id, name, age)
    }).foreach(println)


  }

  case class Stu(Id: String, name: String, age: Int, gender: String, clazz: String)

}
