package com.study.spark.scala.rdd_dataframe_dataset

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * RDD DataFrame DataSet 之间互相转换
 * RDD
 * RDD + 结构 = DataFrame   每行固定为Row
 * DataFrame + 类型 = DataSet
 *
 * @author: stephen.shen
 * @create: 2019-03-11 9:55
 */
object RDDDataFrameDataSetConvert {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("RDDDataFrameDataSetConvert Test")
      // 关闭字段类型推断
      .config("spark.sql.sources.partitionColumnTypeInference.enabled", "false")
      .getOrCreate()
    val rdd: RDD[Row] = spark.sparkContext
      .parallelize(Seq((1, "zhang san", 10), (1, "li si", 20), ("1", "wang wu", 30)))
      .map(line => Row(line._1, line._2, line._3))

    // RDD => DataFrame
    val df = RDD2DataFrame(spark, rdd)
    // RDD => DataSet
    RDD2DataSet(spark, rdd)
    // DataFrame => DataSet
    val ds = DataFrame2DataSet(spark, df)
    // DataFrame => RDD
    DataFrame2RDD(spark, df)
    // DataSet => DataFrame
    DataSet2DataFrame(spark, ds)
    // DataSet => RDD
    DataSet2RDD(spark, ds)

    spark.stop()
  }

  def RDD2DataFrame(spark: SparkSession, rdd: RDD[Row]): DataFrame = {
    // 第一种方式：使用反射推断机制 toDF
    //    val peopleDF = spark.sparkContext
    //      .textFile("examples/src/main/resources/people.txt")
    //      .map(_.split(","))
    //      .map(attributes => Person(attributes(0), attributes(1).trim.toInt))
    //      .toDF()

    // 第二种方式：使用反射推断机制 Row+schema
    val df = spark.createDataFrame(rdd, structType)
    println("RDD2DataFrame Schema")
    df.printSchema()
    df

    // 其他方式
    // rdd.toDF("id","name")
  }

  def RDD2DataSet(spark: SparkSession, rdd: RDD[Row]): Dataset[Person] = {
    import spark.implicits._
    val ds = rdd.map(row =>
      Person(row.getInt(0), row.getString(1), row.getInt(2)))
      .toDS()
    println("RDD2DataSet Schema")
    ds.printSchema()
    ds
  }

  def DataFrame2DataSet(spark: SparkSession, df: DataFrame): Dataset[Person] = {
    import spark.implicits._
    val ds = df.as[Person]
    println("DataFrame2DataSet Schema")
    ds.printSchema()
    ds
  }

  def DataFrame2RDD(spark: SparkSession, df: DataFrame): RDD[Row] = {
    val rdd = df.rdd
    println("DataFrame2RDD ok")
    rdd.take(1).foreach(row => println("id: " + row.getInt(0) + ", name: " + row.getString(1) + ", age: " + row.getInt(2)))
    rdd
  }

  def DataSet2DataFrame(spark: SparkSession, ds: Dataset[Person]): DataFrame = {
    val df = ds.toDF()
    println("DataSet2DataFrame Schema")
    df.printSchema()
    df
  }

  def DataSet2RDD(spark: SparkSession, ds: Dataset[Person]): RDD[Person] = {
    val rdd = ds.rdd
    println("DataSet2RDD ok")
    rdd.take(1).foreach(person => println("id: " + person.id + ", name: " + person.name + ", age: " + person.age))
    rdd
  }

  val structType = StructType(Array(
    StructField("id", IntegerType, true),
    StructField("name", StringType, true),
    StructField("age", IntegerType, true)))

  case class Person(id: Int, name: String, age: Int)

}
