package com.atbeijing.bigdata.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object SparkSQL05_DataFrameAndDataset {

    def main(args: Array[String]): Unit = {

        // TODO 创建环境对象
        val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL")
        val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()
        import spark.implicits._

        // RDD
        val rdd = spark.sparkContext.makeRDD(
            List(
                (1, "zhangsan", 30),
                (2, "lisi", 40),
                (3, "wangwu", 50)
            )
        )

        // RDD => DataFrame
        val df = rdd.toDF("id", "name", "age")
        //df.show()

        // DataFrame => Dataset
        val ds: Dataset[Person] = df.as[Person]
        //ds.show()

        // Dataset => DataFrame
        val df1: DataFrame = ds.toDF()

        // DataFrame => RDD
        val rdd1: RDD[Row] = df1.rdd

        // RDD => Dataset
        val ds1: Dataset[Person] = rdd.map {
            case (id, name, age) => {
                Person(id, name, age)
            }
        }.toDS()

        // Dataset => RDD
        val rdd2: RDD[Person] = ds1.rdd

        // Dataset[Person] => RDD[Person]
        // Dataset[Row] => DataFrame => RDD[Row]


        spark.stop()
    }
    case class Person( id:Int, name:String, age:Int )
}
