package com.doit.sparksql.day02

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SparkSession}


/**
 * @DATE 2022/1/14/9:09
 * @Author MDK
 * @Version 2021.2.2
 *    加载数据
 *    创建dataframe
 *    调用方法处理数据
 * */
object SQL_TableAPI {
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    //环境
    val spark = SparkSession.builder()
      .appName("table-api")
      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()

    //导入隐式函数
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //加载数据
    val df: DataFrame = spark.read.option("header", true).option("inferSchema", true).csv("sql_data/csv/b.csv")
    df.show()
    /**
     * +---+----+---+------+----+
      | id|name|age|gender|city|
      +---+----+---+------+----+
      |  1| zss| 23|     F|  SH|
      |  2| zss| 34|     M|  SH|
      |  3| zss| 56|     M|  CD|
      |  4| zss| 47|     F|  CD|
      |  5| zss| 15|     F|  BJ|
      |  6| zss| 26|     M|  BJ|
      +---+----+---+------+----+
     *
     * */

    /*println("------------------------------------获取指定字段-----------------------------------------")
    //仅能取出字段的值  不能对字段进行操作
    df.select("id","name","city").show()

    //取出的不仅是字段的值  可以对其进行操作
    df.select($"id",$"name",lower($"city")).show()
    df.select('id+1,'name,lower('city)).show()
    df.select(col("id")+1,col("name"),lower(col("city"))).show()

    println("-------------------------------------起别名--------------------------------------------")
    df.select($"id"+1 as "uid",$"name",lower($"city") as "ucity").show()   //可以全部起别名,也可部分起别名
    df.selectExpr("id as u_id","name as u_name", "city as u_city").show()
    df.withColumnRenamed("id","uid").withColumnRenamed("name","uname").show()

    println("----------------where条件过滤-------------------")
    df.where("id>3 and age > 30").show()
//    df.where($"id">3 && $"age">30).show()  //与上一条程序表达式意义一致

    println("--------------------order by----------------------")
    df.orderBy("age").show()  //age升序
    df.orderBy($"age".desc).show()     //age 降序
    df.orderBy('id, 'age.desc).show()   //id升序  id相同按年龄降序

    println("-----------------------group by------------------------")
    //按照城市分组  求每个城市下的总人数
    df.groupBy('city).count().show()*/

    //按照age分组  求年龄的平均值
    df.groupBy("city").avg("age").show()
    //一个字段分组   多个字段求和
    df.groupBy("gender").sum("id", "age").show()

    /*sql语句
    * select
    *count(1), gender, city, sum(age), sum(id), avg(age), collect_list(name)
    *from tb_x
    *group by
    *gender
    * */
    //调用多个聚合函数
    df.groupBy("gender").agg(Map("age"->"avg", ("age", "max"), "name"->"collect_list")).show()
    //常用的另一种表达式
    df.groupBy("gender").agg(
      avg("age") as "avg_age",
      max("age") as "max_age",
      min("id") as "min_id",
      sum("age") as "sum_age",
      //lit(值)转换成常量字段
      count(lit(1)) as "cnt",
      collect_list("name") as "list_names",
      collect_set("name") as "set_names"
    )
      .show()


    println("-----------------------窗口函数-----------------------------")

    spark.close()
  }
}
