package com.shujia.sql

import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object Demo1SparkSql {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("sql")


    //设置spark sql 默认reduce的数量
    conf.set("spark.sql.shuffle.partitions", "2")

    val sc: SparkContext = new SparkContext(conf)


    //创建sql上下文对象
    val sql: SQLContext = new SQLContext(sc)


    //读取json格式的文件
    val student: DataFrame = sql.read.json("spark/data/students.json")

    // df底层也是一个rdd
    //val rdd: RDD[Row] = student.rdd

    student.show()


    student.printSchema()


    student.select("name", "age").show()


    //再选择的时候可以对列进行处理
    student.select(student("age") - 1, student("name")).show()


    //传入一个sql表达式
    student.where("age > 22").show()
    student.where(student("age") > 22).show()


    /**
      * hql执行顺序
      *
      * from ->  join on  ->  where --> group by --> having --> select  --》 order by --> limit
      *
      *
      * select clazz,count(1) from student group by clazz
      *
      */


    //groupBy  之后必须借一个聚合函数
    student.groupBy("clazz").count().show()


    /**
      * 将df注册成一张表
      *
      */

    student.createOrReplaceTempView("student")


    //编写sql

    /**
      * count  从如果指定一个单列，如果列值为null不做计数
      *
      */

    val frame: DataFrame = sql.sql("select clazz,count(1) from student group by clazz")

    frame.show()


  }
}
