package com.bigdata.spark.core.rdd.builder

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

/**
 * @author Gerry chan
 * @version 1.0
 * Spark中将RDD转换成DataFrame的两种方法
 */
object RDD2DataFrame {
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("RDD2DataFrame")
      .getOrCreate()

    //方法一： 使用createDataFrame方法  (rowRDD + schema)
    val schema = StructType(
      Seq(
        StructField("name", StringType, true),
        StructField("age", IntegerType, true)
      )
    )

    val rowRDD: RDD[Row] = sparkSession.sparkContext.textFile("datas/people.txt")
      .map(x => x.split(","))
      .map(x => Row(x(0), x(1).trim().toInt))

    val frame: DataFrame = sparkSession.createDataFrame(rowRDD, schema)
    frame.show()

    //方法二： case class + toDF
    //导入隐饰操作，否则RDD无法调用toDF方法
    import sparkSession.implicits._
    val frame1: DataFrame = sparkSession.sparkContext.textFile("datas/people.txt", 2)
      .map(x => x.split(","))
      .map(x => Person(x(0), x(1).trim.toInt)).toDF()
    frame1.show()

  }
  //不要将case Class定义在main 方法中与toDF一起使用，或与使用toDF定义在同一函数中
  case class Person(name:String, age:Int)
}
