package com.shujia.sql

import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object Demo2SparkSession {
  def main(args: Array[String]): Unit = {


    /**
      *
      * spark 2.0 之后统一编程入口
      *
      */

    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()


    val student: DataFrame = spark.read.json("spark/data/students.json")

    //student.show()


    //获取sparkContext
    val sc: SparkContext = spark.sparkContext


    //导入隐式转换
    import spark.implicits._

    // df  -> ds
    //自动更具列名匹配
    val ds1: Dataset[Student] = student.as[Student]


    ds1.show()

    ds1.groupBy(ds1("clazz")).count().show()


    //ds 可以使用 rdd的api
    ds1.map(stu => stu.id).show()

    //ds 也可以注册成一张视图
    ds1.map(stu => (stu.id, stu.name)).createOrReplaceTempView("value")

    spark.sql("select * from value").show()

  }

  case class Student(id: String, name: String, age: Long, gender: String, clazz: String)

}
