package com.shujia.spark2

import org.apache.spark.sql.{Dataset, SaveMode, SparkSession}
import org.apache.spark.sql.Row

object Demo2SparkSession {
  def main(args: Array[String]): Unit = {
    //spark sql 2.0的入口   , 流批统一入口
    val spark = SparkSession
      .builder()
      .appName("session")
      .master("local")
      .config("spark.sql.shuffle.partitions", "1")
//      .enableHiveSupport() //整合hive
      .getOrCreate() //获取或者创建

    import spark.implicits._

    val df = spark.read.json("spark/data/students.json")
    //    df.show()

    //也可以直接使用sparkContext
    spark.sparkContext.textFile("")


    val student = spark
      .read
      .format("csv")
      .option("sep", ",") //数据分割符
      //.option("inferSchema", "true") //自动推断列的类型
      //.option("header", "true") //用数据的第一行作为列名
      .schema("id STRING , name STRING , age INT , gender STRING , clazz STRING") //指定列名和类型
      .load("spark/data/students.txt")

    student.printSchema()

    student.show()

    //创建临时视图   和表差不多
    student.createOrReplaceTempView("student")

    val topDF = spark.sql("select * from (select * ,row_number() over(partition by clazz order by age desc) as rank from student) as a where a.rank<=2")


    topDF
      .write
      .mode(SaveMode.Overwrite)
      .option("sep", "\t") //数据分割符
      // .option("header", "true") //是否带列名输出
      .csv("spark/data/topDF")


    /**
      * DataFrame: 数据的类型是Row类型
      * Dataset  对rdd的api进行了扩展,增加了DataFrame的功能
      */

    val ds: Dataset[Student] = student.as[Student]

    ds.groupBy($"clazz").count().show()

    ds.filter($"age" > 22).show()

  }

  case class Student(id: String, name: String, age: Int, gender: String, clazz: String)

}
