package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{expressions, _}

object Demo3DataFrameAPI {

  def main(args: Array[String]): Unit = {

    val spark =SparkSession
      .builder()
      .appName("api")
      .master("local")
      .config("spark.sql.shuffle.partitions",1)
      .getOrCreate()

    //导入隐式转换

    import spark.implicits._
    //导入spark所有函数
    import org.apache.spark.sql.functions._

    val student: DataFrame = spark.read.json("data/students.json")


    /**
      * show相当于一个action算子
      */
    //默认读取前20行
    //student.show()

    //指定读取的行数
    //student.show(100)

    //完整显示数据
    //student.show(false)

    /*
    select
     */
    //使用列对象的方式
    //student.select($"name", $"age" + 1 as "age").show()
    //使用列名，不能进行计算
    //student.select("name", "clazz").show()

    //使用sql表达式
    //student.selectExpr("name","age + 1 as age").show()

    //student.select(expr("name"),expr("age + 1 as age") ).show()

    /**
      * where
      */

    //student.where($"age" > 23).show()

    //student.where("age < 22").show()

    /**
      * group
      */
    //student.groupBy($"clazz").count().show()
    //student.groupBy("clazz").count().show()

    /**
      * agg:聚合
      */
    //student.agg(count($"id") as "num").show()

    student
      .groupBy("clazz")
      .agg(count("clazz") as "num")
      //.show()

    student
      .groupBy("clazz")
      .agg(avg("age") as "avgAge")
      //.show()


    val score: DataFrame = spark
      .read
      .format("csv")
      .option("sep",",")
      .schema("sid STRING,cid STRING,sco DOUBLE")
      .load("data/score.txt")

    val stuAS: Dataset[Row] = student.as("stu")
    val scoAS: Dataset[Row] = score.as("sco")


    //如果关联的列名相同
    //val jionDF: DataFrame = stuAS.join(scoAS,$"stu.id" === $"sco.id","inner")

    //jionDF.show()

    //student.join(score,List("id")).show()

    student.join(score,$"id" === $"sid","inner")//.show()


    /**
      * sort
      */

    score
      .groupBy($"sid")
      .agg(sum($"sco") as "sumSco")
      .sort($"sumSco" desc) //降序
      //.show()

    /**
      * 开窗函数 over
      */

    //统计每个班级总分前十名的学生
    score
      .groupBy($"sid")
      .agg(sum($"sco") as "sumScore")  //求出总分
      .join(student,$"sid" === $"id")   //与学生表关联
      .select($"sid",$"name",$"clazz",$"sumScore",row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc) as "r")
      .where($"r" <= 3)
      //.show()

    /**
      * withColumn: 增加字段
      *
      */

    student
      .join(score,$"sid" === $"id")
      .groupBy($"sid",$"clazz")
      .agg(sum($"sco") as "sumSco")
      .withColumn("r",row_number() over Window.partitionBy($"clazz").orderBy($"sumSco".desc))
      .where($"r" <=3)
      //.show()


    /**
      *
      * sql
      */

    student.createOrReplaceTempView("student")
    score.createOrReplaceTempView("score")

    spark.sql(
      """
        |select * from(
        |select id,name,clazz,sumSco,row_number() over(partition by clazz order by sumSco desc) as r from (
        |select a.id,a.name,a.clazz,sum(b.sco) as sumSco from student as a
        |join
        |score as b
        |on a.id=b.sid
        |group by a.id,a.clazz,a.name))
        |where r<=10
        |
      """.stripMargin)//.show()

    /**
      * explode
      */

    val linesDF: DataFrame = spark.read
      .format("csv")
      .option("sep","\t")
      .schema("lines STRING")
      .load("data/words.txt")

    linesDF
      .select(explode(split($"lines",",")) as "word")
      .groupBy($"word")
      .agg(count("word") as "c")
      //.show()


    /**
      * map端的jion
      *
      * 100m左右可以广播
      *
      */

    student.hint("broadcast").join(score,$"id" === $"sid","inner").show()

  }

}
