package com.offcn.bigdata.spark.sql.p1

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
  * Spark编程模型之间的互相转换
  */
object _04ProgramModeConvertionOps {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder()
            .appName("_04ProgramModeConvertionOps")
            .master("local[*]")
            .getOrCreate()
        import spark.implicits._
        //rdd --> dataframe/dataset
        val stusRDD = spark.sparkContext.parallelize(List(
            Person("车传广", 23, "男", "辽宁"),
            Person("闫逾恒", 22, "男", "河北"),
            Person("刘博", 20, "男", "天津"),
            Person("王鑫达", 23, "男", "浙江"),
            Person("田志", 23, "男", "河南")
        ))

        val pdf:DataFrame = stusRDD.toDF("_c1", "_c2", "_c3", "_c4")
        val pds:Dataset[Person] = stusRDD.toDS()
        //dataframe--->rdd/dataset
        pdf.rdd
        //dataframe是不能直接转化为dataset

        //dataset--->rdd/dataframe
        pds.rdd
        pds.toDF()
        spark.stop()
    }
}
