package com.xinqing.bigdata.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
  * @Author:CHQ
  * @Date:2020 /8/5 16:03
  * @Description
  */
object Transform {
  def main(args: Array[String]): Unit = {
    //创建SparkSession对象（执行SparkSQL必备）
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("spark sql test1")
    val sparkSession: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    //通过SparkSession创建RDD
    val rdd: RDD[(String, String, Int)] = sparkSession.sparkContext
      .makeRDD(List(("1", "zhangsan", 20), ("2", "lisi", 21), ("3", "wangwu", 22)))

    //将RDD转换成DataFrame（缺字段）
    import sparkSession.implicits._
    val dataFrame: DataFrame = rdd.toDF("id", "name", "age")

    //将DataFrame转换成DataSet（缺字段的类型）
    val dataSet: Dataset[User] = dataFrame.as[User]

    //将DataSet转换成RDD
    val rdd1: RDD[User] = dataSet.rdd

    //打印结果
    // 1_zhangsan_20
    // 2_lisi_21
    // 3_wangwu_22
    rdd.foreach(user => {
      println(user._1 + "_" + user._2 + "_" + user._3)
    })

    //释放资源
    sparkSession.stop()
  }
}

//样例类
case class User(id: String, name: String, age: Int)
