package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo3DataFrameApi {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("source")
      .master("local")
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    //导入隐式转换
    import spark.implicits._

    //导入spark所有函数
    import org.apache.spark.sql.functions._

    val student: DataFrame = spark.read.json("data/students.json")

    /**
      * show 相当于一个action算子
      *
      */

    //默认读取前20行
//    student.show()

    //读取指定的行数
//    student.show(100)

    //完整显示数据
//    student.show(false)

    /**
      * select  查询
      */
    //使用列对象的方式
    student.select($"name",$"age"+100)
//      .show()

    //使用列名 不能计算
    student.select("name","age")
//      .show()

    //使用sql表达式
    student.selectExpr("name","age+1100")
//      .show()

    student.select(expr("name"),expr("age+11111")as "age")
//      .show()

    /**
      * where
      */
    student.where($"age">22)
//      .show()
    student.where("age<23")
//      .show()

    /**
      * group
      */
    student.groupBy($"clazz").count()
//      .show()
    student.groupBy("clazz").count()
//      .show()

    /**
      * agg  聚合
      */
//    student.agg(count($"id") as "num").show()

    //求每个班平均年龄
    student
      .groupBy($"clazz")
      .agg(avg($"age") as  "avgAge")
//      .show()

    /**
      * json
      */

    val score: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id string,cid string,sco double")
      .load("data/score.txt")

    //对DF取别名(相同id无法识别)
    val stuAs: Dataset[Row] = student.as("stu")
    val scoAS: Dataset[Row] = score.as("sco")

    val joinDF: DataFrame = stuAs.join(scoAS,$"stu.id" === $"sco.id","inner")

//    joinDF.show()

    stuAs.join(scoAS,List("id"),"inner")
//        .show()

    /**
      * sort
      */

    score.groupBy("id")
      .agg(sum($"sco")as "scoSum")
      .sort($"scoSum".desc)//降序
      .select($"id",$"scoSum")
//      .show()
    /**
      * 开窗函数
      */
    //获取每个班级前十的人数

    student
      .join(score,"id")
      //根据id分组
      .groupBy($"id",$"clazz")
      //求成绩总分
      .agg(sum($"sco")as "scoSum")
      //根据成绩总分排序
      .select($"id",$"clazz",$"scoSum",row_number() over Window.partitionBy("clazz").orderBy($"scoSum".desc)as "r")
      .where($"r" <=10)
//        .show()


    /**
      * withColumn: 增加字段
      *
      */
    student
      .join(score,"id")
      .groupBy("id","clazz")
      .agg(sum($"sco") as "scoSum")
      .withColumn("r",row_number() over Window.partitionBy($"clazz").orderBy($"scoSum".desc))
      .where($"r"<=10)
//        .show()

    /**
      *
      * sql
      */

    student.createOrReplaceTempView("student")
    score.createOrReplaceTempView("score")

    val resultDF: DataFrame = spark.sql(
      """
         select * from
         (select id ,clazz,scoSum,row_number() over (partition by clazz order by scoSum desc) as r from
         (select t1.id,t1.clazz,sum(t2.sco) as scoSum from student as t1
         join
         score as t2
         on t1.id = t2.id
         group by t1.id ,t1.clazz) as c) as d
         where d.r<10

      """.stripMargin)
    //resultDF.show()
    /**
      * explode 展开
      */
    val linesDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("lines String")
      .load("data/words.txt")

    linesDF
      .select(explode(split($"lines",","))as "word")
      .groupBy($"word")
      .agg(count($"word")as "c")
      .show()

    /**
      * map join
      *
      * 100mb 大小的时候可以广播
      */

    student.hint("broadcast").join(score,"id") show()
  }
}
