package com.offcn.bigdata.sql.p1

import org.apache.spark.sql.{Column, DataFrame, SparkSession}

/**
  * sparksql的编程 api
  *     sql: SparkSession
  */
object _01SparkSQLOps {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder()
                    .appName("_01SparkSQLOps")
                    .master("local[*]")
//                    .enableHiveSupport()//和hive进行整合，就需要该参数
                    .getOrCreate()
        val pdf:DataFrame = spark.read.json("file:/E:/data/spark/sql/people.json")
        //查看表结构
        pdf.printSchema()
        //查看表数据
        pdf.show()
        //查询其中的name和height: select name, height from tbl
        pdf.select(new Column("name"), new Column("age")).show()
        //查询其中的name和height: select name, height + 2 from tbl
        pdf.select(new Column("name"), new Column("height").+(2).as("height")).show()
        import spark.implicits._
        pdf.select($"name", ($"height" + 2).as("heighted")).show()
        //select * from tbl where age between 13 and 19
        pdf.select("name", "age", "height", "province")
//                .where($"age" between(13, 19))
                .where("age between 13 and 19")
                .show()
        //聚合统计： select province, count(1) from tbl group by province
        pdf.select("province").groupBy("province").count().show()
        /*
            统计每个省份的人数，最大年龄，最小年龄
                select province, count(1) count, max(age) max_age, min(age) min_age from tbl group by province
            作业：用sparksql完成上述的统计
            domain special language
          */
        //sql操作
        pdf.createOrReplaceTempView("people")
        spark.sql(
            """
              |select
              |  province,
              |  count(1) count,
              |  max(age) max_age,
              |  min(age) min_age
              |from people
              |group by province
            """.stripMargin).show()
        println("------------------dsl--------------------")
        pdf.groupBy("province")
            .agg(
                "age" -> "max",
                "age" -> "min",
                "province" -> "count"
            ).toDF(
                "province",
                "maxAge",
                "minAge",
                "count"
            ).show()

        spark.stop()
    }
}
