package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo1SparkSession {
  def main(args: Array[String]): Unit = {
    /**
      * SparkSession可以用来代替SparkConf和SparkContext
      */
    val spark: SparkSession = SparkSession
      .builder()
      .appName("sparkSql")
      .master("local")
      //spark sql shuffle之后的分区数，如果在集群中运行默认是200
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    //导入spark 相关的隐式类型转化
    import spark.implicits._

    //读取一个json文件
    val stuDF: DataFrame = spark.read.json("data/students.json")

    //查看DataFrame数据
    stuDF.show() //show默认只会显示20行数据

    //打印DataFrame表结构
    stuDF.printSchema()

    //select 选择指定的列
    stuDF.select("name","id","clazz").show()

    /**
      * $ 获取列对象 ，可以对列进行计算
      *
      * as 取别名
      */
    stuDF.select($"name",$"age"+100 as "age").show()

    //where 过滤
    stuDF.where($"age">21).show()

    //groupBy 分组求和
    stuDF.groupBy($"clazz").count().show()

    //createOrReplaceTempView 创建临时视图
    stuDF.createOrReplaceTempView("student")

    //编写sql
    val sparkSqlDF: DataFrame = spark.sql("select clazz,count(1) from student group by clazz")

    sparkSqlDF.show()

    /**
      * sql的执行顺序
      *
      * from   -->  join -->on -->where -->group by --> having --> select --> order by -->limit
      */

    //保存数据
    sparkSqlDF
      .write
      .mode(SaveMode.Overwrite)
      .csv("data/json")
  }
}
