package com.atbeijing.bigdata.spark.mytest.sql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object SparkSQL_Transform {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("tran")
    val ss: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    import ss.implicits._

    val r1: RDD[(Int, String, Int)] = ss.sparkContext.makeRDD(List(
      (1, "zhangsan", 30),
      (2, "lisi", 40),
      (3, "wangwu", 50)
    ))

    val df1: DataFrame = r1.toDF("id","name","age")
    df1.show()

    println("===================================")
    val r2: RDD[Row] = df1.rdd
    r2.collect().foreach(println)

    println("===================================")
    val ds1: Dataset[(Int, String, Int)] = r1.toDS()
    ds1.show()

    println("===================================")
    val ds2: Dataset[Student] = r1.map(w => {
      Student(w._1, w._2, w._3)
    }).toDS()
    ds2.show()

    println("===================================")
    val ds3: Dataset[Student] = df1.as[Student]
    ds3.select("id").show()

    println("===================================")
    val df2: DataFrame = ds3.toDF()
    df2.show()
  }
  case class Student(id:Int,name:String,age:Int)
}
