package day8

import org.apache.spark.sql.SparkSession

object Test5_kaiChuang {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("yy").getOrCreate()
    import spark.implicits._

    var df=List((1,"zs",true,1,100),(2,"ls",false,2,300),(3,"ww",false,2,500),(4,"zl",false,1,200),
      (5,"win7",false,1,150)).toDF("id","name","sex","dept","salary")

    df.createTempView("t_emp")

    /**
     * sum(salary) over() :  对整张表的工资进行累加
     * sum(salary) over(partition by 字段) :  根据字段分组，然后将分组的工资进行累加
     * sum(salary) over(partition by dept order by xxx ) :  根据字段分组，然后将分组的工资进行[逐条]累加
     */
    spark.sql(
      """
        |
        |select *,sum(salary)
        |   over(partition by dept order by salary) x1
        |from t_emp
        |
        |""".stripMargin).show()

    spark.stop()
  }
}
