package com.larry.spark.sql

import com.larry.spark.rdd.transform.RDD_Oper_sortByKey_1.User
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object Sql_Oper_1 {

  def main(args: Array[String]): Unit = {
    import org.apache.spark
    //TODO  使用spark coalesce  缩减分区
    //TODO  默认情况下 缩减分区不会shuffle

    val conf = new SparkConf().setMaster("local[*]").setAppName("sql")

    //创建session对象
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    import spark.implicits._

    //读取json
    val df = spark.read.json("input/user.json")

//    df.show()

    //sql风格语法
    df.createOrReplaceTempView("user")
//    sparkSession.sql("select avg(age) from user").show()

    //DSL语法
//    df.select("username","age").show()

    //RDD
    val rdd1: RDD[(Int, String, Int)] = spark.sparkContext.makeRDD(List((1, "zhangsan", 30), (2, "lisi", 28), (3, "wangwu", 20)))
//    rdd1.foreach(println)

    //dataframe
    val df1: DataFrame = rdd1.toDF("id", "name", "age")
//    df1.show()

    //dataset
    val ds1 = df1.as[User]
//    ds1.show()

    //DataFrame
    val df2 = ds1.toDF()

    val rdd2 = df2.rdd

    //*****RDD=>DataSet*****
    rdd1.map{
      case (id,name,age) => User(id,name,age)
    }.toDS()

    //*****DataSet=>=>RDD*****
    ds1.rdd

    //关闭资源
    spark.stop()

  }
}

case class User(id:Int,name:String,age:Int)