package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object Demo07RDDToDF {
  def main(args: Array[String]): Unit = {
    // RDD 与 DF之间的相互转换
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo07RDDToDF")
      .master("local")
      .getOrCreate()

    import spark.implicits._

    val sc: SparkContext = spark.sparkContext

    // 通过SparkContext读取文件生成RDD
    val stuRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    // 第一种方式：手动指定schema 将RDD转换成DF
    val stuRddToDF: DataFrame = stuRDD
      // 将每一条数据的每一列切开来，然后构建成一个多元组
      .map(line => {
        val splits: Array[String] = line.split(",")
        (splits(0), splits(1), splits(2).toInt, splits(3), splits(4))
      }).toDF("id", "name", "age", "gender", "clazz")

    stuRddToDF.show()

    // 第二种方式：借助样例类来转换成DF
    val stuRddToDFByCaseClass: DataFrame = stuRDD
      // 将每一条数据的每一列切开来，然后构建成一个样例类对象
      .map(line => {
        val splits: Array[String] = line.split(",")
        StuRDD(splits(0), splits(1), splits(2).toInt, splits(3), splits(4))
      }).toDF()

    stuRddToDFByCaseClass.show()

    // DF to RDD
    val stuDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id String,name String,age Int,gender String,clazz String")
      .load("spark/data/students.txt")

    val stuDFToRDD: RDD[Row] = stuDF.rdd

    stuDFToRDD
      .map(row=>{
        val name: String = row.getAs[String]("name")
        name
      })
      .take(10).foreach(println)
  }

  case class StuRDD(id: String, name: String, age: Int, gender: String, clazz: String)

}
