package com.shujia.sql

import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

object Demo3Sql {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local") //本地运行
      .setAppName("map")

    //spark sql 默认并行度
    conf.set("spark.sql.shuffle.partitions", "4")

    val sc = new SparkContext(conf)


    //创建sparksql上下文对象
    val sqlContext = new SQLContext(sc)

    //导入一个隐式转换
    import sqlContext.implicits._


    //读取json数据  创建dataFrame
    val df = sqlContext.read.json("data/student.json")


    //相当于一个action
    df.show()

    //打印列信息
    df.printSchema()

    //选择列
    df.select("name", "age").show()

    df.select(df("name"), df("age") + 1).show()

    df.select(df("name"), df("age") + 1).show()

    //过滤
    df.filter(df("age") > 23).show()

    df.filter("gender = '男'").show()

    //分组求和
    df.groupBy("clazz").count().show()

    /**
      * 将df注册成一张表
      *
      */
    df.registerTempTable("student")


    //通过sqlCOntext编写sql  执行完成返回一个DF
    sqlContext
      .sql("select * from student where gender='男'")
      .show(100, false)


    val countDF = sqlContext
      .sql("select clazz ,count(1) from student group by clazz")

    //将df数据保存到文件
    //Overwrite  覆盖
    countDF
      .write
      .mode(SaveMode.Overwrite)
      .json("data/sqlout")


  }

}
