package com.ada.spark.sparksql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}


object SparkSql03_Transform {

    def main(args: Array[String]) {

        //创建配置对象
        val conf: SparkConf = new SparkConf().setAppName("SparkSql03_Transform").setMaster("local[*]")

        //创建SparkSql的环境对象
        val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate();

        //进行转换之前，需要引入隐式转换规则
        //这里的spark不是包名的含义，是SparkSession对象的名字
        import spark.implicits._

        //创建RDD
        val rdd: RDD[(Int, String, Int)] = spark.sparkContext.makeRDD(List((1, "Jack", 20), (2, "Bob", 22), (3, "Lucy", 28)))

        //RDD -> DF
        val df: DataFrame = rdd.toDF("id", "name", "age")

        //DF -> DS
        val ds: Dataset[User] = df.as[User]

        //DS -> DF
        val df1: DataFrame = ds.toDF()

        //DF -> RDD
        val rdd1: RDD[Row] = df1.rdd

        rdd1.foreach(row => {
            //获取数据时，可以通过索引访问数据
            //            Int id = row.getInt(0)
            //            String name = row.getString(1)
            //            Int age = row.getInt(2)
            println(row.mkString(","))
        })

        //释放资源
        spark.stop()
    }

}

case class User(id: Int, name: String, age: Int)