package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo3DFApi {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession.builder()
      .master("local")
      .appName("df")
      .config("spark.sql.shuffle.partitions", 1) //spark sql shuffle之后分区数据，默认值是200
      .getOrCreate()


    val student: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",") //指定数据分隔方式，默认是逗号
      //指定列名核列的类型， 顺序要核数据的顺序一致
      .schema("id STRING, name STRING , age INT , gender  STRING , clazz STRING")
      .load("data/students.txt")

    /**
      * show : 相当于action算子，查看部分数据
      *
      */

    student.show(100)
    //完整显示数据
    student.show(110, false)


    /**
      * select : 选择字段
      *
      */
    //使用列名
    student.select("id", "name").show()

    import spark.implicits._
    //使用列对象的方式
    student.select($"id", $"age").show()

    //可以再select中处理数据
    //可以通过as 修改列名
    student.select($"id", $"age" + 1 as "age").show()

    /**
      * where
      *
      */

    //小于等于
    student.where($"age" <= 22).show()

    //等于
    student.where($"age" === 24).show()

    //不等于
    student.where($"age" =!= 24).show()

    //多个条件
    student.where($"age" =!= 24 and $"gender" === "男").show()


    /**
      * group 聚合
      *
      */

    student.groupBy("clazz").avg("age").show()


    //导入spark 所有的函数
    import org.apache.spark.sql.functions._

    student
      .groupBy("clazz")
      .agg(avg($"age") as "avgAge")
      .show()

    student
      .groupBy("clazz")
      .agg(count($"clazz") as "count")
      .show()


    //多个列分组
    student
      .groupBy("clazz", "gender")
      .agg(avg($"age") as "avgAge")
      .show()


    /**
      * join
      *
      */


    //分数表
    val score: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",") //指定数据分隔方式，默认是逗号
      //指定列名核列的类型， 顺序要核数据的顺序一致
      .schema("sId STRING, couId STRING , sco INT ")
      .load("data/score.txt")


    //如果两个表关联的列名一样直接直接指定列名进行关联
    //    val joinDF: DataFrame = student.join(score, "id")
    //    joinDF.show()

    //如果两个表的关联字段名称不一样
    //指定关联条件进行表关联
    val joinDF: DataFrame = student.join(score, $"id" === $"sid")
    joinDF.show()


    //统计学生的总分
    student
      .join(score, $"id" === $"sid")
      .groupBy($"id", $"name", $"age", $"gender", $"clazz")
      .agg(sum($"sco") as "sumSco")
      .show()

    /**
      *
      * select id ,name,age,gender,clazz,sum(sco) as "sumSco from
      * student  as a
      * join score  as b
      * on a.id=b.sid
      * group by id ,name,age,gender,clazz
      *
      * sql 执行顺序
      * from --> join on ->  where  --> group by ---> having  --> select --> order by ---> limit
      *
      */


    /**
      * orderBy: 默认是升序
      *
      */

    score
      .groupBy($"sid")
      .agg(sum($"sco") as "sumSco")
      .orderBy($"sumSco".desc)
      .show()
  }
}
